summaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-02-28 19:23:06 -0800
committerDavid S. Miller <davem@davemloft.net>2010-02-28 19:23:06 -0800
commit47871889c601d8199c51a4086f77eebd77c29b0b (patch)
tree40cdcac3bff0ee40cc33dcca61d0577cdf965f77 /drivers/staging
parentc16cc0b464b8876cfd57ce1c1dbcb6f9a6a0bce3 (diff)
parent30ff056c42c665b9ea535d8515890857ae382540 (diff)
downloadtalos-op-linux-47871889c601d8199c51a4086f77eebd77c29b0b.tar.gz
talos-op-linux-47871889c601d8199c51a4086f77eebd77c29b0b.zip
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts: drivers/firmware/iscsi_ibft.c
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/go7007/s2250-board.c2
-rw-r--r--drivers/staging/octeon/Makefile1
-rw-r--r--drivers/staging/octeon/ethernet-defines.h34
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h1
-rw-r--r--drivers/staging/octeon/ethernet-mem.c124
-rw-r--r--drivers/staging/octeon/ethernet-proc.c144
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c56
-rw-r--r--drivers/staging/octeon/ethernet-rx.c384
-rw-r--r--drivers/staging/octeon/ethernet-rx.h25
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c441
-rw-r--r--drivers/staging/octeon/ethernet-tx.h29
-rw-r--r--drivers/staging/octeon/ethernet-util.h13
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c1
-rw-r--r--drivers/staging/octeon/ethernet.c254
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h58
-rw-r--r--drivers/staging/sm7xx/smtc2d.c2
-rw-r--r--drivers/staging/sm7xx/smtc2d.h2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
23 files changed, 725 insertions, 887 deletions
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c
index 8cf7f2750b3f..c324f6ea002b 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/go7007/s2250-board.c
@@ -159,7 +159,7 @@ static int write_reg(struct i2c_client *client, u8 reg, u8 value)
struct go7007 *go = i2c_get_adapdata(client->adapter);
struct go7007_usb *usb;
int rc;
- int dev_addr = client->addr;
+ int dev_addr = client->addr << 1; /* firmware wants 8-bit address */
u8 *buf;
if (go == NULL)
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index c0a583cc2227..87447c102fa0 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -14,7 +14,6 @@ obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
octeon-ethernet-objs := ethernet.o
octeon-ethernet-objs += ethernet-mdio.o
octeon-ethernet-objs += ethernet-mem.o
-octeon-ethernet-objs += ethernet-proc.o
octeon-ethernet-objs += ethernet-rgmii.o
octeon-ethernet-objs += ethernet-rx.o
octeon-ethernet-objs += ethernet-sgmii.o
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index f13131b03c33..6a2cd50a17df 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -41,17 +41,10 @@
* Tells the driver to populate the packet buffers with kernel skbuffs.
* This allows the driver to receive packets without copying them. It also
* means that 32bit userspace can't access the packet buffers.
- * USE_32BIT_SHARED
- * This define tells the driver to allocate memory for buffers from the
- * 32bit sahred region instead of the kernel memory space.
* USE_HW_TCPUDP_CHECKSUM
* Controls if the Octeon TCP/UDP checksum engine is used for packet
* output. If this is zero, the kernel will perform the checksum in
* software.
- * USE_MULTICORE_RECEIVE
- * Process receive interrupts on multiple cores. This spreads the network
- * load across the first 8 processors. If ths is zero, only one core
- * processes incomming packets.
* USE_ASYNC_IOBDMA
* Use asynchronous IO access to hardware. This uses Octeon's asynchronous
* IOBDMAs to issue IO accesses without stalling. Set this to zero
@@ -75,29 +68,15 @@
#define CONFIG_CAVIUM_RESERVE32 0
#endif
-#if CONFIG_CAVIUM_RESERVE32
-#define USE_32BIT_SHARED 1
-#define USE_SKBUFFS_IN_HW 0
-#define REUSE_SKBUFFS_WITHOUT_FREE 0
-#else
-#define USE_32BIT_SHARED 0
#define USE_SKBUFFS_IN_HW 1
#ifdef CONFIG_NETFILTER
#define REUSE_SKBUFFS_WITHOUT_FREE 0
#else
#define REUSE_SKBUFFS_WITHOUT_FREE 1
#endif
-#endif
-
-/* Max interrupts per second per core */
-#define INTERRUPT_LIMIT 10000
-/* Don't limit the number of interrupts */
-/*#define INTERRUPT_LIMIT 0 */
#define USE_HW_TCPUDP_CHECKSUM 1
-#define USE_MULTICORE_RECEIVE 1
-
/* Enable Random Early Dropping under load */
#define USE_RED 1
#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
@@ -115,21 +94,12 @@
/* Use this to not have FPA frees control L2 */
/*#define DONT_WRITEBACK(x) 0 */
-/* Maximum number of packets to process per interrupt. */
-#define MAX_RX_PACKETS 120
/* Maximum number of SKBs to try to free per xmit packet. */
-#define MAX_SKB_TO_FREE 10
#define MAX_OUT_QUEUE_DEPTH 1000
-#ifndef CONFIG_SMP
-#undef USE_MULTICORE_RECEIVE
-#define USE_MULTICORE_RECEIVE 0
-#endif
-
-#define IP_PROTOCOL_TCP 6
-#define IP_PROTOCOL_UDP 0x11
+#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
+#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
-#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 05a5cc0f43ed..7e0be8d00dc3 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -96,11 +96,11 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
};
/**
- * IOCTL support for PHY control
- *
+ * cvm_oct_ioctl - IOCTL support for PHY control
* @dev: Device to change
* @rq: the request
* @cmd: the command
+ *
* Returns Zero on success
*/
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -153,7 +153,7 @@ static void cvm_oct_adjust_link(struct net_device *dev)
/**
- * Setup the PHY
+ * cvm_oct_phy_setup_device - setup the PHY
*
* @dev: Device to setup
*
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index 55d0614a7cd9..a417d4fce12c 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -32,7 +32,6 @@
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index b595903e2af1..00cc91df6b46 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2007 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,6 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -36,18 +34,19 @@
#include "cvmx-fpa.h"
/**
- * Fill the supplied hardware pool with skbuffs
- *
+ * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
* @pool: Pool to allocate an skbuff for
* @size: Size of the buffer needed for the pool
* @elements: Number of buffers to allocate
+ *
+ * Returns the actual number of buffers allocated.
*/
static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
{
int freed = elements;
while (freed) {
- struct sk_buff *skb = dev_alloc_skb(size + 128);
+ struct sk_buff *skb = dev_alloc_skb(size + 256);
if (unlikely(skb == NULL)) {
pr_warning
("Failed to allocate skb for hardware pool %d\n",
@@ -55,7 +54,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
break;
}
- skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f));
+ skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
freed--;
@@ -64,8 +63,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
}
/**
- * Free the supplied hardware pool of skbuffs
- *
+ * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
* @pool: Pool to allocate an skbuff for
* @size: Size of the buffer needed for the pool
* @elements: Number of buffers to allocate
@@ -93,96 +91,76 @@ static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
}
/**
- * This function fills a hardware pool with memory. Depending
- * on the config defines, this memory might come from the
- * kernel or global 32bit memory allocated with
- * cvmx_bootmem_alloc.
- *
+ * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
* @pool: Pool to populate
* @size: Size of each buffer in the pool
* @elements: Number of buffers to allocate
+ *
+ * Returns the actual number of buffers allocated.
*/
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
char *memory;
+ char *fpa;
int freed = elements;
- if (USE_32BIT_SHARED) {
- extern uint64_t octeon_reserve32_memory;
-
- memory =
- cvmx_bootmem_alloc_range(elements * size, 128,
- octeon_reserve32_memory,
- octeon_reserve32_memory +
- (CONFIG_CAVIUM_RESERVE32 << 20) -
- 1);
- if (memory == NULL)
- panic("Unable to allocate %u bytes for FPA pool %d\n",
- elements * size, pool);
-
- pr_notice("Memory range %p - %p reserved for "
- "hardware\n", memory,
- memory + elements * size - 1);
-
- while (freed) {
- cvmx_fpa_free(memory, pool, 0);
- memory += size;
- freed--;
- }
- } else {
- while (freed) {
- /* We need to force alignment to 128 bytes here */
- memory = kmalloc(size + 127, GFP_ATOMIC);
- if (unlikely(memory == NULL)) {
- pr_warning("Unable to allocate %u bytes for "
- "FPA pool %d\n",
- elements * size, pool);
- break;
- }
- memory = (char *)(((unsigned long)memory + 127) & -128);
- cvmx_fpa_free(memory, pool, 0);
- freed--;
+ while (freed) {
+ /*
+ * FPA memory must be 128 byte aligned. Since we are
+ * aligning we need to save the original pointer so we
+ * can feed it to kfree when the memory is returned to
+ * the kernel.
+ *
+ * We allocate an extra 256 bytes to allow for
+ * alignment and space for the original pointer saved
+ * just before the block.
+ */
+ memory = kmalloc(size + 256, GFP_ATOMIC);
+ if (unlikely(memory == NULL)) {
+ pr_warning("Unable to allocate %u bytes for FPA pool %d\n",
+ elements * size, pool);
+ break;
}
+ fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL);
+ *((char **)fpa - 1) = memory;
+ cvmx_fpa_free(fpa, pool, 0);
+ freed--;
}
return elements - freed;
}
/**
- * Free memory previously allocated with cvm_oct_fill_hw_memory
- *
+ * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory
* @pool: FPA pool to free
* @size: Size of each buffer in the pool
* @elements: Number of buffers that should be in the pool
*/
static void cvm_oct_free_hw_memory(int pool, int size, int elements)
{
- if (USE_32BIT_SHARED) {
- pr_warning("Warning: 32 shared memory is not freeable\n");
- } else {
- char *memory;
- do {
- memory = cvmx_fpa_alloc(pool);
- if (memory) {
- elements--;
- kfree(phys_to_virt(cvmx_ptr_to_phys(memory)));
- }
- } while (memory);
+ char *memory;
+ char *fpa;
+ do {
+ fpa = cvmx_fpa_alloc(pool);
+ if (fpa) {
+ elements--;
+ fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa));
+ memory = *((char **)fpa - 1);
+ kfree(memory);
+ }
+ } while (fpa);
- if (elements < 0)
- pr_warning("Freeing of pool %u had too many "
- "buffers (%d)\n",
- pool, elements);
- else if (elements > 0)
- pr_warning("Warning: Freeing of pool %u is "
- "missing %d buffers\n",
- pool, elements);
- }
+ if (elements < 0)
+ pr_warning("Freeing of pool %u had too many buffers (%d)\n",
+ pool, elements);
+ else if (elements > 0)
+ pr_warning("Warning: Freeing of pool %u is missing %d buffers\n",
+ pool, elements);
}
int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
{
int freed;
- if (USE_SKBUFFS_IN_HW)
+ if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
else
freed = cvm_oct_fill_hw_memory(pool, size, elements);
@@ -191,7 +169,7 @@ int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
{
- if (USE_SKBUFFS_IN_HW)
+ if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL)
cvm_oct_free_hw_skbuff(pool, size, elements);
else
cvm_oct_free_hw_memory(pool, size, elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
deleted file mode 100644
index 16308d484d3b..000000000000
--- a/drivers/staging/octeon/ethernet-proc.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/**********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-**********************************************************************/
-#include <linux/kernel.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <net/dst.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "octeon-ethernet.h"
-#include "ethernet-defines.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-pip.h"
-
-/**
- * User is reading /proc/octeon_ethernet_stats
- *
- * @m:
- * @v:
- * Returns
- */
-static int cvm_oct_stats_show(struct seq_file *m, void *v)
-{
- struct octeon_ethernet *priv;
- int port;
-
- for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
-
- if (cvm_oct_device[port]) {
- priv = netdev_priv(cvm_oct_device[port]);
-
- seq_printf(m, "\nOcteon Port %d (%s)\n", port,
- cvm_oct_device[port]->name);
- seq_printf(m,
- "rx_packets: %12lu\t"
- "tx_packets: %12lu\n",
- priv->stats.rx_packets,
- priv->stats.tx_packets);
- seq_printf(m,
- "rx_bytes: %12lu\t"
- "tx_bytes: %12lu\n",
- priv->stats.rx_bytes, priv->stats.tx_bytes);
- seq_printf(m,
- "rx_errors: %12lu\t"
- "tx_errors: %12lu\n",
- priv->stats.rx_errors,
- priv->stats.tx_errors);
- seq_printf(m,
- "rx_dropped: %12lu\t"
- "tx_dropped: %12lu\n",
- priv->stats.rx_dropped,
- priv->stats.tx_dropped);
- seq_printf(m,
- "rx_length_errors: %12lu\t"
- "tx_aborted_errors: %12lu\n",
- priv->stats.rx_length_errors,
- priv->stats.tx_aborted_errors);
- seq_printf(m,
- "rx_over_errors: %12lu\t"
- "tx_carrier_errors: %12lu\n",
- priv->stats.rx_over_errors,
- priv->stats.tx_carrier_errors);
- seq_printf(m,
- "rx_crc_errors: %12lu\t"
- "tx_fifo_errors: %12lu\n",
- priv->stats.rx_crc_errors,
- priv->stats.tx_fifo_errors);
- seq_printf(m,
- "rx_frame_errors: %12lu\t"
- "tx_heartbeat_errors: %12lu\n",
- priv->stats.rx_frame_errors,
- priv->stats.tx_heartbeat_errors);
- seq_printf(m,
- "rx_fifo_errors: %12lu\t"
- "tx_window_errors: %12lu\n",
- priv->stats.rx_fifo_errors,
- priv->stats.tx_window_errors);
- seq_printf(m,
- "rx_missed_errors: %12lu\t"
- "multicast: %12lu\n",
- priv->stats.rx_missed_errors,
- priv->stats.multicast);
- }
- }
-
- return 0;
-}
-
-/**
- * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
- *
- * @inode:
- * @file:
- * Returns
- */
-static int cvm_oct_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cvm_oct_stats_show, NULL);
-}
-
-static const struct file_operations cvm_oct_stats_operations = {
- .open = cvm_oct_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void cvm_oct_proc_initialize(void)
-{
- struct proc_dir_entry *entry =
- create_proc_entry("octeon_ethernet_stats", 0, NULL);
- if (entry)
- entry->proc_fops = &cvm_oct_stats_operations;
-}
-
-void cvm_oct_proc_shutdown(void)
-{
- remove_proc_entry("octeon_ethernet_stats", NULL);
-}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
deleted file mode 100644
index 82c7d9f78bc4..000000000000
--- a/drivers/staging/octeon/ethernet-proc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-*********************************************************************/
-
-void cvm_oct_proc_initialize(void);
-void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 3820f1ec11d1..a0d4d4b98bdc 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,7 +26,7 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -48,14 +48,20 @@ static int number_rgmii_ports;
static void cvm_oct_rgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags;
+ unsigned long flags = 0;
cvmx_helper_link_info_t link_info;
+ int use_global_register_lock = (priv->phydev == NULL);
- /*
- * Take the global register lock since we are going to touch
- * registers that affect more than one port.
- */
- spin_lock_irqsave(&global_register_lock, flags);
+ BUG_ON(in_interrupt());
+ if (use_global_register_lock) {
+ /*
+ * Take the global register lock since we are going to
+ * touch registers that affect more than one port.
+ */
+ spin_lock_irqsave(&global_register_lock, flags);
+ } else {
+ mutex_lock(&priv->phydev->bus->mdio_lock);
+ }
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info) {
@@ -115,7 +121,11 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
dev->name);
}
}
- spin_unlock_irqrestore(&global_register_lock, flags);
+
+ if (use_global_register_lock)
+ spin_unlock_irqrestore(&global_register_lock, flags);
+ else
+ mutex_unlock(&priv->phydev->bus->mdio_lock);
return;
}
@@ -151,7 +161,12 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
link_info = cvmx_helper_link_autoconf(priv->port);
priv->link_info = link_info.u64;
}
- spin_unlock_irqrestore(&global_register_lock, flags);
+
+ if (use_global_register_lock)
+ spin_unlock_irqrestore(&global_register_lock, flags);
+ else {
+ mutex_unlock(&priv->phydev->bus->mdio_lock);
+ }
if (priv->phydev == NULL) {
/* Tell core. */
@@ -213,8 +228,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
struct net_device *dev =
cvm_oct_device[cvmx_helper_get_ipd_port
(interface, index)];
- if (dev)
- cvm_oct_rgmii_poll(dev);
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue, &priv->port_work);
+
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
gmx_rx_int_reg.s.phy_link = 1;
@@ -252,8 +270,11 @@ static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
struct net_device *dev =
cvm_oct_device[cvmx_helper_get_ipd_port
(interface, index)];
- if (dev)
- cvm_oct_rgmii_poll(dev);
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_work(cvm_oct_poll_queue, &priv->port_work);
+
gmx_rx_int_reg.u64 = 0;
gmx_rx_int_reg.s.phy_dupx = 1;
gmx_rx_int_reg.s.phy_link = 1;
@@ -302,6 +323,12 @@ int cvm_oct_rgmii_stop(struct net_device *dev)
return 0;
}
+static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
+{
+ struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
+ cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
+}
+
int cvm_oct_rgmii_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -309,7 +336,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
-
+ INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
/*
* Due to GMX errata in CN3XXX series chips, it is necessary
* to take the link down immediately when the PHY changes
@@ -397,4 +424,5 @@ void cvm_oct_rgmii_uninit(struct net_device *dev)
number_rgmii_ports--;
if (number_rgmii_ports == 0)
free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
+ cancel_work_sync(&priv->port_work);
}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1b237b7e689d..cb38f9eb2cc0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2007 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -27,16 +27,14 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cache.h>
+#include <linux/cpumask.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/prefetch.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
+#include <linux/smp.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
@@ -48,8 +46,9 @@
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
-#include "octeon-ethernet.h"
#include "ethernet-mem.h"
+#include "ethernet-rx.h"
+#include "octeon-ethernet.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
@@ -61,62 +60,88 @@
#include "cvmx-gmxx-defs.h"
-struct cvm_tasklet_wrapper {
- struct tasklet_struct t;
-};
+struct cvm_napi_wrapper {
+ struct napi_struct napi;
+} ____cacheline_aligned_in_smp;
-/*
- * Aligning the tasklet_struct on cachline boundries seems to decrease
- * throughput even though in theory it would reduce contantion on the
- * cache lines containing the locks.
- */
+static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
-static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS];
+struct cvm_oct_core_state {
+ int baseline_cores;
+ /*
+ * The number of additional cores that could be processing
+ * input packtes.
+ */
+ atomic_t available_cores;
+ cpumask_t cpu_state;
+} ____cacheline_aligned_in_smp;
-/**
- * Interrupt handler. The interrupt occurs whenever the POW
- * transitions from 0->1 packets in our group.
- *
- * @cpl:
- * @dev_id:
- * @regs:
- * Returns
- */
-irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
+static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
+
+static void cvm_oct_enable_napi(void *_)
{
- /* Acknowledge the interrupt */
- if (INTERRUPT_LIMIT)
- cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group);
- else
- cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group);
- preempt_disable();
- tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
- preempt_enable();
- return IRQ_HANDLED;
+ int cpu = smp_processor_id();
+ napi_schedule(&cvm_oct_napi[cpu].napi);
+}
+
+static void cvm_oct_enable_one_cpu(void)
+{
+ int v;
+ int cpu;
+
+ /* Check to see if more CPUs are available for receive processing... */
+ v = atomic_sub_if_positive(1, &core_state.available_cores);
+ if (v < 0)
+ return;
+
+ /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
+ for_each_online_cpu(cpu) {
+ if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
+ v = smp_call_function_single(cpu, cvm_oct_enable_napi,
+ NULL, 0);
+ if (v)
+ panic("Can't enable NAPI.");
+ break;
+ }
+ }
+}
+
+static void cvm_oct_no_more_work(void)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * CPU zero is special. It always has the irq enabled when
+ * waiting for incoming packets.
+ */
+ if (cpu == 0) {
+ enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ return;
+ }
+
+ cpu_clear(cpu, core_state.cpu_state);
+ atomic_add(1, &core_state.available_cores);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * This is called when the kernel needs to manually poll the
- * device. For Octeon, this is simply calling the interrupt
- * handler. We actually poll all the devices, not just the
- * one supplied.
+ * cvm_oct_do_interrupt - interrupt handler.
+ *
+ * The interrupt occurs whenever the POW has packets in our group.
*
- * @dev: Device to poll. Unused
*/
-void cvm_oct_poll_controller(struct net_device *dev)
+static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
- preempt_disable();
- tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
- preempt_enable();
+ /* Disable the IRQ and start napi_poll. */
+ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+ cvm_oct_enable_napi(NULL);
+
+ return IRQ_HANDLED;
}
-#endif
/**
- * This is called on receive errors, and determines if the packet
- * can be dropped early-on in cvm_oct_tasklet_rx().
- *
+ * cvm_oct_check_rcv_error - process receive errors
* @work: Work queue entry pointing to the packet.
+ *
* Returns Non-zero if the packet can be dropped, zero otherwise.
*/
static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
@@ -199,19 +224,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
/**
- * Tasklet function that is scheduled on a core when an interrupt occurs.
+ * cvm_oct_napi_poll - the NAPI poll function.
+ * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
+ * @budget: Maximum number of packets to receive.
*
- * @unused:
+ * Returns the number of packets processed.
*/
-void cvm_oct_tasklet_rx(unsigned long unused)
+static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
{
- const int coreid = cvmx_get_core_num();
- uint64_t old_group_mask;
- uint64_t old_scratch;
- int rx_count = 0;
- int number_to_free;
- int num_freed;
- int packet_not_copied;
+ const int coreid = cvmx_get_core_num();
+ uint64_t old_group_mask;
+ uint64_t old_scratch;
+ int rx_count = 0;
+ int did_work_request = 0;
+ int packet_not_copied;
/* Prefetch cvm_oct_device since we know we need it soon */
prefetch(cvm_oct_device);
@@ -227,59 +253,63 @@ void cvm_oct_tasklet_rx(unsigned long unused)
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
(old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
- if (USE_ASYNC_IOBDMA)
+ if (USE_ASYNC_IOBDMA) {
cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ did_work_request = 1;
+ }
- while (1) {
+ while (rx_count < budget) {
struct sk_buff *skb = NULL;
+ struct sk_buff **pskb = NULL;
int skb_in_hw;
cvmx_wqe_t *work;
- if (USE_ASYNC_IOBDMA) {
+ if (USE_ASYNC_IOBDMA && did_work_request)
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
- } else {
- if ((INTERRUPT_LIMIT == 0)
- || likely(rx_count < MAX_RX_PACKETS))
- work =
- cvmx_pow_work_request_sync
- (CVMX_POW_NO_WAIT);
- else
- work = NULL;
- }
+ else
+ work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
+
prefetch(work);
- if (work == NULL)
+ did_work_request = 0;
+ if (work == NULL) {
+ union cvmx_pow_wq_int wq_int;
+ wq_int.u64 = 0;
+ wq_int.s.iq_dis = 1 << pow_receive_group;
+ wq_int.s.wq_int = 1 << pow_receive_group;
+ cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
break;
+ }
+ pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
+ prefetch(pskb);
- /*
- * Limit each core to processing MAX_RX_PACKETS
- * packets without a break. This way the RX can't
- * starve the TX task.
- */
- if (USE_ASYNC_IOBDMA) {
-
- if ((INTERRUPT_LIMIT == 0)
- || likely(rx_count < MAX_RX_PACKETS))
- cvmx_pow_work_request_async_nocheck
- (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
- else {
- cvmx_scratch_write64(CVMX_SCR_SCRATCH,
- 0x8000000000000000ull);
- cvmx_pow_tag_sw_null_nocheck();
- }
+ if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+ cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ did_work_request = 1;
+ }
+
+ if (rx_count == 0) {
+ /*
+ * First time through, see if there is enough
+ * work waiting to merit waking another
+ * CPU.
+ */
+ union cvmx_pow_wq_int_cntx counts;
+ int backlog;
+ int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
+ counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
+ backlog = counts.s.iq_cnt + counts.s.ds_cnt;
+ if (backlog > budget * cores_in_use && napi != NULL)
+ cvm_oct_enable_one_cpu();
}
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
- skb =
- *(struct sk_buff
- **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
- sizeof(void *));
+ skb = *pskb;
prefetch(&skb->head);
prefetch(&skb->len);
}
prefetch(cvm_oct_device[work->ipprt]);
- rx_count++;
/* Immediately throw away all packets with receive errors */
if (unlikely(work->word2.snoip.rcv_error)) {
if (cvm_oct_check_rcv_error(work))
@@ -292,39 +322,27 @@ void cvm_oct_tasklet_rx(unsigned long unused)
* buffer.
*/
if (likely(skb_in_hw)) {
- /*
- * This calculation was changed in case the
- * skb header is using a different address
- * aliasing type than the buffer. It doesn't
- * make any differnece now, but the new one is
- * more correct.
- */
- skb->data =
- skb->head + work->packet_ptr.s.addr -
- cvmx_ptr_to_phys(skb->head);
+ skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
prefetch(skb->data);
skb->len = work->len;
skb_set_tail_pointer(skb, skb->len);
packet_not_copied = 1;
} else {
-
/*
* We have to copy the packet. First allocate
* an skbuff for it.
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- DEBUGPRINT("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
+ DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
+ work->ipprt);
cvm_oct_free_work(work);
continue;
}
/*
* Check if we've received a packet that was
- * entirely stored in the work entry. This is
- * untested.
+ * entirely stored in the work entry.
*/
if (unlikely(work->word2.s.bufs == 0)) {
uint8_t *ptr = work->packet_data;
@@ -343,15 +361,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr =
- work->packet_ptr;
+ union cvmx_buf_ptr segment_ptr = work->packet_ptr;
int len = work->len;
while (segments--) {
union cvmx_buf_ptr next_ptr =
- *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(segment_ptr.s.
- addr - 8);
+ *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+
/*
* Octeon Errata PKI-100: The segment size is
* wrong. Until it is fixed, calculate the
@@ -361,22 +377,18 @@ void cvm_oct_tasklet_rx(unsigned long unused)
* one: int segment_size =
* segment_ptr.s.size;
*/
- int segment_size =
- CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr -
- (((segment_ptr.s.addr >> 7) -
- segment_ptr.s.back) << 7));
- /* Don't copy more than what is left
- in the packet */
+ int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
+ /*
+ * Don't copy more than what
+ * is left in the packet.
+ */
if (segment_size > len)
segment_size = len;
/* Copy the data into the packet */
memcpy(skb_put(skb, segment_size),
- cvmx_phys_to_ptr(segment_ptr.s.
- addr),
+ cvmx_phys_to_ptr(segment_ptr.s.addr),
segment_size);
- /* Reduce the amount of bytes left
- to copy */
len -= segment_size;
segment_ptr = next_ptr;
}
@@ -389,16 +401,15 @@ void cvm_oct_tasklet_rx(unsigned long unused)
struct net_device *dev = cvm_oct_device[work->ipprt];
struct octeon_ethernet *priv = netdev_priv(dev);
- /* Only accept packets for devices
- that are currently up */
+ /*
+ * Only accept packets for devices that are
+ * currently up.
+ */
if (likely(dev->flags & IFF_UP)) {
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely
- (work->word2.s.not_IP
- || work->word2.s.IP_exc
- || work->word2.s.L4_error))
+ if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -414,15 +425,13 @@ void cvm_oct_tasklet_rx(unsigned long unused)
#endif
}
netif_receive_skb(skb);
+ rx_count++;
} else {
+ /* Drop any packet received for a device that isn't up */
/*
- * Drop any packet received for a
- * device that isn't up.
- */
- /*
- DEBUGPRINT("%s: Device not up, packet dropped\n",
- dev->name);
- */
+ DEBUGPRINT("%s: Device not up, packet dropped\n",
+ dev->name);
+ */
#ifdef CONFIG_64BIT
atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
#else
@@ -435,9 +444,8 @@ void cvm_oct_tasklet_rx(unsigned long unused)
* Drop any packet received for a device that
* doesn't exist.
*/
- DEBUGPRINT("Port %d not controlled by Linux, packet "
- "dropped\n",
- work->ipprt);
+ DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
+ work->ipprt);
dev_kfree_skb_irq(skb);
}
/*
@@ -459,47 +467,93 @@ void cvm_oct_tasklet_rx(unsigned long unused)
cvm_oct_free_work(work);
}
}
-
/* Restore the original POW group mask */
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
if (USE_ASYNC_IOBDMA) {
/* Restore the scratch area */
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
}
+ cvm_oct_rx_refill_pool(0);
- if (USE_SKBUFFS_IN_HW) {
- /* Refill the packet buffer pool */
- number_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-
- if (number_to_free > 0) {
- cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- -number_to_free);
- num_freed =
- cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- number_to_free);
- if (num_freed != number_to_free) {
- cvmx_fau_atomic_add32
- (FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
- }
- }
+ if (rx_count < budget && napi != NULL) {
+ /* No more work */
+ napi_complete(napi);
+ cvm_oct_no_more_work();
}
+ return rx_count;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * cvm_oct_poll_controller - poll for receive packets
+ * device.
+ *
+ * @dev: Device to poll. Unused
+ */
+void cvm_oct_poll_controller(struct net_device *dev)
+{
+ cvm_oct_napi_poll(NULL, 16);
+}
+#endif
+
void cvm_oct_rx_initialize(void)
{
int i;
- /* Initialize all of the tasklets */
- for (i = 0; i < NR_CPUS; i++)
- tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0);
+ struct net_device *dev_for_napi = NULL;
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
+ if (cvm_oct_device[i]) {
+ dev_for_napi = cvm_oct_device[i];
+ break;
+ }
+ }
+
+ if (NULL == dev_for_napi)
+ panic("No net_devices were allocated.");
+
+ if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
+ atomic_set(&core_state.available_cores, max_rx_cpus);
+ else
+ atomic_set(&core_state.available_cores, num_online_cpus());
+ core_state.baseline_cores = atomic_read(&core_state.available_cores);
+
+ core_state.cpu_state = CPU_MASK_NONE;
+ for_each_possible_cpu(i) {
+ netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
+ napi_enable(&cvm_oct_napi[i].napi);
+ }
+ /* Register an IRQ hander for to receive POW interrupts */
+ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
+ cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
+
+ if (i)
+ panic("Could not acquire Ethernet IRQ %d\n",
+ OCTEON_IRQ_WORKQ0 + pow_receive_group);
+
+ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ /* Enable POW interrupt when our port has at least one packet */
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+
+
+ /* Scheduld NAPI now. This will indirectly enable interrupts. */
+ cvm_oct_enable_one_cpu();
}
void cvm_oct_rx_shutdown(void)
{
int i;
- /* Shutdown all of the tasklets */
- for (i = 0; i < NR_CPUS; i++)
- tasklet_kill(&cvm_oct_tasklet[i].t);
+ /* Shutdown all of the NAPIs */
+ for_each_possible_cpu(i)
+ netif_napi_del(&cvm_oct_napi[i].napi);
}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a9b72b87a7a6..a0743b85d54e 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,10 +24,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
*********************************************************************/
+#include "cvmx-fau.h"
-irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
void cvm_oct_poll_controller(struct net_device *dev);
-void cvm_oct_tasklet_rx(unsigned long unused);
-
void cvm_oct_rx_initialize(void);
void cvm_oct_rx_shutdown(void);
+
+static inline void cvm_oct_rx_refill_pool(int fill_threshold)
+{
+ int number_to_free;
+ int num_freed;
+ /* Refill the packet buffer pool */
+ number_to_free =
+ cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+
+ if (number_to_free > fill_threshold) {
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ -number_to_free);
+ num_freed = cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
+ CVMX_FPA_PACKET_POOL_SIZE,
+ number_to_free);
+ if (num_freed != number_to_free) {
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ number_to_free - num_freed);
+ }
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 6061d01eca2d..2d8589eb461e 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -26,7 +26,6 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/mii.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 00dc0f4bad19..b58b8971f939 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,7 +26,6 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/mii.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 535294105f65..afc2b734d554 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2007 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -31,10 +31,6 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/string.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
@@ -52,11 +48,14 @@
#include "cvmx-wqe.h"
#include "cvmx-fau.h"
+#include "cvmx-pip.h"
#include "cvmx-pko.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
+#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
+
/*
* You can define GET_SKBUFF_QOS() to override how the skbuff output
* function determines which output queue is used. The default
@@ -68,12 +67,81 @@
#define GET_SKBUFF_QOS(skb) 0
#endif
+static void cvm_oct_tx_do_cleanup(unsigned long arg);
+static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
+
+static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
+{
+ int32_t undo;
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(fau, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
+ return skb_to_free;
+}
+
+static void cvm_oct_kick_tx_poll_watchdog(void)
+{
+ union cvmx_ciu_timx ciu_timx;
+ ciu_timx.u64 = 0;
+ ciu_timx.s.one_shot = 1;
+ ciu_timx.s.len = cvm_oct_tx_poll_interval;
+ cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
+}
+
+void cvm_oct_free_tx_skbs(struct net_device *dev)
+{
+ int32_t skb_to_free;
+ int qos, queues_per_port;
+ int total_freed = 0;
+ int total_remaining = 0;
+ unsigned long flags;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ queues_per_port = cvmx_pko_get_num_queues(priv->port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
+ continue;
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+
+
+ total_freed += skb_to_free;
+ if (skb_to_free > 0) {
+ struct sk_buff *to_free_list = NULL;
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ while (skb_to_free > 0) {
+ struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+ t->next = to_free_list;
+ to_free_list = t;
+ skb_to_free--;
+ }
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ /* Do the actual freeing outside of the lock. */
+ while (to_free_list) {
+ struct sk_buff *t = to_free_list;
+ to_free_list = to_free_list->next;
+ dev_kfree_skb_any(t);
+ }
+ }
+ total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
+ }
+ if (total_freed >= 0 && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ if (total_remaining)
+ cvm_oct_kick_tx_poll_watchdog();
+}
+
/**
- * Packet transmit
- *
+ * cvm_oct_xmit - transmit a packet
* @skb: Packet to send
* @dev: Device info structure
- * Returns Always returns zero
+ *
+ * Returns Always returns NETDEV_TX_OK
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -81,13 +149,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
union cvmx_buf_ptr hw_buffer;
uint64_t old_scratch;
uint64_t old_scratch2;
- int dropped;
int qos;
- int queue_it_up;
+ int i;
+ enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
+ struct sk_buff *to_free_list;
int32_t skb_to_free;
- int32_t undo;
int32_t buffers_to_free;
+ u32 total_to_clean;
+ unsigned long flags;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
#endif
@@ -98,9 +168,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
prefetch(priv);
- /* Start off assuming no drop */
- dropped = 0;
-
/*
* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
* completely remove "qos" in the event neither interface
@@ -135,6 +202,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
}
/*
+ * We have space for 6 segment pointers, If there will be more
+ * than that, we must linearize.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
+ if (unlikely(__skb_linearize(skb))) {
+ queue_type = QUEUE_DROP;
+ if (USE_ASYNC_IOBDMA) {
+ /* Get the number of skbuffs in use by the hardware */
+ CVMX_SYNCIOBDMA;
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ } else {
+ /* Get the number of skbuffs in use by the hardware */
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ }
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ goto skip_xmit;
+ }
+ }
+
+ /*
* The CN3XXX series of parts has an errata (GMX-401) which
* causes the GMX block to hang if a collision occurs towards
* the end of a <68 byte packet. As a workaround for this, we
@@ -162,13 +251,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- /* Build the PKO buffer pointer */
- hw_buffer.u64 = 0;
- hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
- hw_buffer.s.pool = 0;
- hw_buffer.s.size =
- (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
-
/* Build the PKO command */
pko_command.u64 = 0;
pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
@@ -178,7 +260,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
pko_command.s.subone0 = 1;
pko_command.s.dontfree = 1;
- pko_command.s.reg0 = priv->fau + qos * 4;
+
+ /* Build the PKO buffer pointer */
+ hw_buffer.u64 = 0;
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.pool = 0;
+ hw_buffer.s.size = skb->len;
+ } else {
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.pool = 0;
+ hw_buffer.s.size = skb_headlen(skb);
+ CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
+ hw_buffer.s.size = fs->size;
+ CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
+ }
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+ hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
+ pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
+ pko_command.s.gather = 1;
+ goto dont_put_skbuff_in_hw;
+ }
+
/*
* See if we can put this skb in the FPA pool. Any strange
* behavior from the Linux networking stack will most likely
@@ -190,7 +296,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* shown a 25% increase in performance under some loads.
*/
#if REUSE_SKBUFFS_WITHOUT_FREE
- fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f);
+ fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
if (unlikely(skb->data < fpa_head)) {
/*
* printk("TX buffer beginning can't meet FPA
@@ -248,10 +354,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
* We can use this buffer in the FPA. We don't need the FAU
* update anymore
*/
- pko_command.s.reg0 = 0;
pko_command.s.dontfree = 0;
- hw_buffer.s.back = (skb->data - fpa_head) >> 7;
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/*
@@ -272,16 +377,16 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
skb->tc_verd = 0;
#endif /* CONFIG_NET_CLS_ACT */
#endif /* CONFIG_NET_SCHED */
+#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
dont_put_skbuff_in_hw:
-#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
/* Check if we can use the hardware checksumming */
if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
- && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
- || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) {
+ && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
+ || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
}
@@ -299,89 +404,116 @@ dont_put_skbuff_in_hw:
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
- /*
- * We try to claim MAX_SKB_TO_FREE buffers. If there were not
- * that many available, we have to un-claim (undo) any that
- * were in excess. If skb_to_free is positive we will free
- * that many buffers.
- */
- undo = skb_to_free > 0 ?
- MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
- if (undo > 0)
- cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
- MAX_SKB_TO_FREE : -skb_to_free;
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
/*
* If we're sending faster than the receive can free them then
* don't do the HW free.
*/
- if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
+ if ((buffers_to_free < -100) && !pko_command.s.dontfree)
pko_command.s.dontfree = 1;
- pko_command.s.reg0 = priv->fau + qos * 4;
+
+ if (pko_command.s.dontfree) {
+ queue_type = QUEUE_CORE;
+ pko_command.s.reg0 = priv->fau+qos*4;
+ } else {
+ queue_type = QUEUE_HW;
}
+ if (USE_ASYNC_IOBDMA)
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
- cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
- CVMX_PKO_LOCK_CMD_QUEUE);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
- if (unlikely
- (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
- /*
- DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name);
- */
- dropped = 1;
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
+ if (dev->tx_queue_len != 0) {
+ /* Drop the lock when notifying the core. */
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ } else {
+ /* If not using normal queueing. */
+ queue_type = QUEUE_DROP;
+ goto skip_xmit;
+ }
}
+
+ cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
+ CVMX_PKO_LOCK_NONE);
+
/* Send the packet to the output queue */
- else if (unlikely
- (cvmx_pko_send_packet_finish
- (priv->port, priv->queue + qos, pko_command, hw_buffer,
- CVMX_PKO_LOCK_CMD_QUEUE))) {
+ if (unlikely(cvmx_pko_send_packet_finish(priv->port,
+ priv->queue + qos,
+ pko_command, hw_buffer,
+ CVMX_PKO_LOCK_NONE))) {
DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
- dropped = 1;
+ queue_type = QUEUE_DROP;
+ }
+skip_xmit:
+ to_free_list = NULL;
+
+ switch (queue_type) {
+ case QUEUE_DROP:
+ skb->next = to_free_list;
+ to_free_list = skb;
+ priv->stats.tx_dropped++;
+ break;
+ case QUEUE_HW:
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
+ break;
+ case QUEUE_CORE:
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ break;
+ default:
+ BUG();
+ }
+
+ while (skb_to_free > 0) {
+ struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+ t->next = to_free_list;
+ to_free_list = t;
+ skb_to_free--;
+ }
+
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+
+ /* Do the actual freeing outside of the lock. */
+ while (to_free_list) {
+ struct sk_buff *t = to_free_list;
+ to_free_list = to_free_list->next;
+ dev_kfree_skb_any(t);
}
if (USE_ASYNC_IOBDMA) {
+ CVMX_SYNCIOBDMA;
+ total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
/* Restore the scratch area */
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
- }
-
- queue_it_up = 0;
- if (unlikely(dropped)) {
- dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
} else {
- if (USE_SKBUFFS_IN_HW) {
- /* Put this packet on the queue to be freed later */
- if (pko_command.s.dontfree)
- queue_it_up = 1;
- else
- cvmx_fau_atomic_add32
- (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- } else {
- /* Put this packet on the queue to be freed later */
- queue_it_up = 1;
- }
+ total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
}
- if (queue_it_up) {
- spin_lock(&priv->tx_free_list[qos].lock);
- __skb_queue_tail(&priv->tx_free_list[qos], skb);
- cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
- spin_unlock(&priv->tx_free_list[qos].lock);
- } else {
- cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
+ if (total_to_clean & 0x3ff) {
+ /*
+ * Schedule the cleanup tasklet every 1024 packets for
+ * the pathological case of high traffic on one port
+ * delaying clean up of packets on a different port
+ * that is blocked waiting for the cleanup.
+ */
+ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
}
- return 0;
+ cvm_oct_kick_tx_poll_watchdog();
+
+ return NETDEV_TX_OK;
}
/**
- * Packet transmit to the POW
- *
+ * cvm_oct_xmit_pow - transmit a packet to the POW
* @skb: Packet to send
* @dev: Device info structure
+
* Returns Always returns zero
*/
int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
@@ -459,8 +591,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->word2.s.dec_ipcomp = 0; /* FIXME */
#endif
work->word2.s.tcp_or_udp =
- (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
- || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP);
+ (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ || (ip_hdr(skb)->protocol == IPPROTO_UDP);
#if 0
/* FIXME */
work->word2.s.dec_ipsec = 0;
@@ -529,116 +661,63 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
}
/**
- * Transmit a work queue entry out of the ethernet port. Both
- * the work queue entry and the packet data can optionally be
- * freed. The work will be freed on error as well.
- *
- * @dev: Device to transmit out.
- * @work_queue_entry:
- * Work queue entry to send
- * @do_free: True if the work queue entry and packet data should be
- * freed. If false, neither will be freed.
- * @qos: Index into the queues for this port to transmit on. This
- * is used to implement QoS if their are multiple queues per
- * port. This parameter must be between 0 and the number of
- * queues per port minus 1. Values outside of this range will
- * be change to zero.
+ * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
+ * @dev: Device being shutdown
*
- * Returns Zero on success, negative on failure.
*/
-int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
- int do_free, int qos)
+void cvm_oct_tx_shutdown_dev(struct net_device *dev)
{
- unsigned long flags;
- union cvmx_buf_ptr hw_buffer;
- cvmx_pko_command_word0_t pko_command;
- int dropped;
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_wqe_t *work = work_queue_entry;
+ unsigned long flags;
+ int qos;
- if (!(dev->flags & IFF_UP)) {
- DEBUGPRINT("%s: Device not up\n", dev->name);
- if (do_free)
- cvm_oct_free_work(work);
- return -1;
+ for (qos = 0; qos < 16; qos++) {
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ while (skb_queue_len(&priv->tx_free_list[qos]))
+ dev_kfree_skb_any(__skb_dequeue
+ (&priv->tx_free_list[qos]));
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
}
+}
- /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
- remove "qos" in the event neither interface supports
- multiple queues per port */
- if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
- (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
- if (qos <= 0)
- qos = 0;
- else if (qos >= cvmx_pko_get_num_queues(priv->port))
- qos = 0;
- } else
- qos = 0;
-
- /* Start off assuming no drop */
- dropped = 0;
-
- local_irq_save(flags);
- cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
- CVMX_PKO_LOCK_CMD_QUEUE);
-
- /* Build the PKO buffer pointer */
- hw_buffer.u64 = 0;
- hw_buffer.s.addr = work->packet_ptr.s.addr;
- hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
- hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
- hw_buffer.s.back = work->packet_ptr.s.back;
+static void cvm_oct_tx_do_cleanup(unsigned long arg)
+{
+ int port;
- /* Build the PKO command */
- pko_command.u64 = 0;
- pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
- pko_command.s.dontfree = !do_free;
- pko_command.s.segs = work->word2.s.bufs;
- pko_command.s.total_bytes = work->len;
+ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
+ if (cvm_oct_device[port]) {
+ struct net_device *dev = cvm_oct_device[port];
+ cvm_oct_free_tx_skbs(dev);
+ }
+ }
+}
- /* Check if we can use the hardware checksumming */
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc))
- pko_command.s.ipoffp1 = 0;
- else
- pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
+{
+ /* Disable the interrupt. */
+ cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
+ /* Do the work in the tasklet. */
+ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
+ return IRQ_HANDLED;
+}
- /* Send the packet to the output queue */
- if (unlikely
- (cvmx_pko_send_packet_finish
- (priv->port, priv->queue + qos, pko_command, hw_buffer,
- CVMX_PKO_LOCK_CMD_QUEUE))) {
- DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
- dropped = -1;
- }
- local_irq_restore(flags);
+void cvm_oct_tx_initialize(void)
+{
+ int i;
- if (unlikely(dropped)) {
- if (do_free)
- cvm_oct_free_work(work);
- priv->stats.tx_dropped++;
- } else if (do_free)
- cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
+ /* Disable the interrupt. */
+ cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
+ /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
+ i = request_irq(OCTEON_IRQ_TIMER1,
+ cvm_oct_tx_cleanup_watchdog, 0,
+ "Ethernet", cvm_oct_device);
- return dropped;
+ if (i)
+ panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
}
-EXPORT_SYMBOL(cvm_oct_transmit_qos);
-/**
- * This function frees all skb that are currently queued for TX.
- *
- * @dev: Device being shutdown
- */
-void cvm_oct_tx_shutdown(struct net_device *dev)
+void cvm_oct_tx_shutdown(void)
{
- struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags;
- int qos;
-
- for (qos = 0; qos < 16; qos++) {
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
- while (skb_queue_len(&priv->tx_free_list[qos]))
- dev_kfree_skb_any(__skb_dequeue
- (&priv->tx_free_list[qos]));
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
- }
+ /* Free the interrupt handler */
+ free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index c0bebf750bc0..547680c6c371 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -29,29 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
-void cvm_oct_tx_shutdown(struct net_device *dev);
-
-/**
- * Free dead transmit skbs.
- *
- * @priv: The driver data
- * @skb_to_free: The number of SKBs to free (free none if negative).
- * @qos: The queue to free from.
- * @take_lock: If true, acquire the skb list lock.
- */
-static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
- int skb_to_free,
- int qos, int take_lock)
-{
- /* Free skbuffs not in use by the hardware. */
- if (skb_to_free > 0) {
- if (take_lock)
- spin_lock(&priv->tx_free_list[qos].lock);
- while (skb_to_free > 0) {
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
- skb_to_free--;
- }
- if (take_lock)
- spin_unlock(&priv->tx_free_list[qos].lock);
- }
-}
+void cvm_oct_tx_initialize(void);
+void cvm_oct_tx_shutdown(void);
+void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 37b665918000..23467563fe57 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -30,10 +30,9 @@
} while (0)
/**
- * Given a packet data address, return a pointer to the
- * beginning of the packet buffer.
- *
+ * cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
+ *
* Returns Packet buffer pointer
*/
static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
@@ -43,9 +42,7 @@ static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
}
/**
- * Given an IPD/PKO port number, return the logical interface it is
- * on.
- *
+ * INTERFACE - convert IPD port to locgical interface
* @ipd_port: Port to check
*
* Returns Logical interface
@@ -65,9 +62,7 @@ static inline int INTERFACE(int ipd_port)
}
/**
- * Given an IPD/PKO port number, return the port's index on a
- * logical interface.
- *
+ * INDEX - convert IPD/PKO port number to the port's interface index
* @ipd_port: Port to check
*
* Returns Index into interface port list
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index ee3dc41b2c53..3fca1cc31ed8 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -26,7 +26,6 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/mii.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 220de133a6a5..4a2161f70c7f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/delay.h>
#include <linux/phy.h>
#include <net/dst.h>
@@ -43,8 +42,6 @@
#include "ethernet-tx.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include "ethernet-proc.h"
-
#include "cvmx-pip.h"
#include "cvmx-pko.h"
@@ -104,13 +101,15 @@ MODULE_PARM_DESC(pow_send_list, "\n"
"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
"\tusing the pow_send_group.");
-static int disable_core_queueing = 1;
-module_param(disable_core_queueing, int, 0444);
-MODULE_PARM_DESC(disable_core_queueing, "\n"
- "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
- "\tallows packets to be sent without lock contention in the packet\n"
- "\tscheduler resulting in some cases in improved throughput.\n");
+int max_rx_cpus = -1;
+module_param(max_rx_cpus, int, 0444);
+MODULE_PARM_DESC(max_rx_cpus, "\n"
+ "\t\tThe maximum number of CPUs to use for packet reception.\n"
+ "\t\tUse -1 to use all available CPUs.");
+int rx_napi_weight = 32;
+module_param(rx_napi_weight, int, 0444);
+MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
/*
* The offset from mac_addr_base that should be used for the next port
@@ -122,9 +121,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
static unsigned int cvm_oct_mac_addr_offset;
/**
- * Periodic timer to check auto negotiation
+ * cvm_oct_poll_queue - Workqueue for polling operations.
+ */
+struct workqueue_struct *cvm_oct_poll_queue;
+
+/**
+ * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
+ *
+ * Set to one right before cvm_oct_poll_queue is destroyed.
*/
-static struct timer_list cvm_oct_poll_timer;
+atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
/**
* Array of every ethernet device owned by this driver indexed by
@@ -132,65 +138,44 @@ static struct timer_list cvm_oct_poll_timer;
*/
struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-/**
- * Periodic timer tick for slow management operations
- *
- * @arg: Device to check
- */
-static void cvm_do_timer(unsigned long arg)
+u64 cvm_oct_tx_poll_interval;
+
+static void cvm_oct_rx_refill_worker(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
+
+static void cvm_oct_rx_refill_worker(struct work_struct *work)
{
- int32_t skb_to_free, undo;
- int queues_per_port;
- int qos;
- struct octeon_ethernet *priv;
- static int port;
+ /*
+ * FPA 0 may have been drained, try to refill it if we need
+ * more than num_packet_buffers / 2, otherwise normal receive
+ * processing will refill it. If it were drained, no packets
+ * could be received so cvm_oct_napi_poll would never be
+ * invoked to do the refill.
+ */
+ cvm_oct_rx_refill_pool(num_packet_buffers / 2);
- if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
- /*
- * All ports have been polled. Start the next
- * iteration through the ports in one second.
- */
- port = 0;
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
- return;
- }
- if (!cvm_oct_device[port])
- goto out;
+ if (!atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_delayed_work(cvm_oct_poll_queue,
+ &cvm_oct_rx_refill_work, HZ);
+}
+
+static void cvm_oct_periodic_worker(struct work_struct *work)
+{
+ struct octeon_ethernet *priv = container_of(work,
+ struct octeon_ethernet,
+ port_periodic_work.work);
- priv = netdev_priv(cvm_oct_device[port]);
if (priv->poll)
- priv->poll(cvm_oct_device[port]);
-
- queues_per_port = cvmx_pko_get_num_queues(port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
- continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- undo = skb_to_free > 0 ?
- MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
- if (undo > 0)
- cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
- MAX_SKB_TO_FREE : -skb_to_free;
- cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
- }
- cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
+ priv->poll(cvm_oct_device[priv->port]);
-out:
- port++;
- /* Poll the next port in a 50th of a second.
- This spreads the polling of ports out a little bit */
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
-}
+ cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
+
+ if (!atomic_read(&cvm_oct_poll_queue_stopping))
+ queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
+ }
-/**
- * Configure common hardware for all interfaces
- */
static __init void cvm_oct_configure_common_hw(void)
{
- int r;
/* Setup the FPA */
cvmx_fpa_enable();
cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
@@ -205,28 +190,13 @@ static __init void cvm_oct_configure_common_hw(void)
cvmx_helper_setup_red(num_packet_buffers / 4,
num_packet_buffers / 8);
- /* Enable the MII interface */
- if (!octeon_is_simulation())
- cvmx_write_csr(CVMX_SMIX_EN(0), 1);
-
- /* Register an IRQ hander for to receive POW interrupts */
- r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
- cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
- cvm_oct_device);
-
-#if defined(CONFIG_SMP) && 0
- if (USE_MULTICORE_RECEIVE) {
- irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
- cpu_online_mask);
- }
-#endif
}
/**
- * Free a work queue entry received in a intercept callback.
+ * cvm_oct_free_work- Free a work queue entry
+ *
+ * @work_queue_entry: Work queue entry to free
*
- * @work_queue_entry:
- * Work queue entry to free
* Returns Zero on success, Negative on failure.
*/
int cvm_oct_free_work(void *work_queue_entry)
@@ -253,9 +223,9 @@ int cvm_oct_free_work(void *work_queue_entry)
EXPORT_SYMBOL(cvm_oct_free_work);
/**
- * Get the low level ethernet statistics
- *
+ * cvm_oct_common_get_stats - get the low level ethernet statistics
* @dev: Device to get the statistics from
+ *
* Returns Pointer to the statistics
*/
static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
@@ -299,8 +269,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
}
/**
- * Change the link MTU. Unimplemented
- *
+ * cvm_oct_common_change_mtu - change the link MTU
* @dev: Device to change
* @new_mtu: The new MTU
*
@@ -364,8 +333,7 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
}
/**
- * Set the multicast list. Currently unimplemented.
- *
+ * cvm_oct_common_set_multicast_list - set the multicast list
* @dev: Device to work on
*/
static void cvm_oct_common_set_multicast_list(struct net_device *dev)
@@ -420,10 +388,10 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
}
/**
- * Set the hardware MAC address for a device
- *
- * @dev: Device to change the MAC address for
- * @addr: Address structure to change it too. MAC address is addr + 2.
+ * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
+ * @dev: The device in question.
+ * @addr: Address structure to change it too.
+
* Returns Zero on success
*/
static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
@@ -470,9 +438,9 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
}
/**
- * Per network device initialization
- *
+ * cvm_oct_common_init - per network device initialization
* @dev: Device to initialize
+ *
* Returns Zero on success
*/
int cvm_oct_common_init(struct net_device *dev)
@@ -510,8 +478,11 @@ int cvm_oct_common_init(struct net_device *dev)
&& (always_use_pow || strstr(pow_send_list, dev->name)))
priv->queue = -1;
- if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
- dev->features |= NETIF_F_IP_CSUM;
+ if (priv->queue != -1) {
+ dev->features |= NETIF_F_SG;
+ if (USE_HW_TCPUDP_CHECKSUM)
+ dev->features |= NETIF_F_IP_CSUM;
+ }
/* We do our own locking, Linux doesn't need to */
dev->features |= NETIF_F_LLTX;
@@ -625,12 +596,6 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
extern void octeon_mdiobus_force_mod_depencency(void);
-/**
- * Module/ driver initialization. Creates the linux network
- * devices.
- *
- * Returns Zero on success
- */
static int __init cvm_oct_init_module(void)
{
int num_interfaces;
@@ -648,8 +613,12 @@ static int __init cvm_oct_init_module(void)
else
cvm_oct_mac_addr_offset = 0;
- cvm_oct_proc_initialize();
- cvm_oct_rx_initialize();
+ cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
+ if (cvm_oct_poll_queue == NULL) {
+ pr_err("octeon-ethernet: Cannot create workqueue");
+ return -ENOMEM;
+ }
+
cvm_oct_configure_common_hw();
cvmx_helper_initialize_packet_io_global();
@@ -682,6 +651,9 @@ static int __init cvm_oct_init_module(void)
*/
cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+ /* Initialize the FAU used for counting tx SKBs that need to be freed */
+ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
+
if ((pow_send_group != -1)) {
struct net_device *dev;
pr_info("\tConfiguring device for POW only access\n");
@@ -689,7 +661,6 @@ static int __init cvm_oct_init_module(void)
if (dev) {
/* Initialize the device private structure. */
struct octeon_ethernet *priv = netdev_priv(dev);
- memset(priv, 0, sizeof(struct octeon_ethernet));
dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
@@ -700,19 +671,16 @@ static int __init cvm_oct_init_module(void)
skb_queue_head_init(&priv->tx_free_list[qos]);
if (register_netdev(dev) < 0) {
- pr_err("Failed to register ethernet "
- "device for POW\n");
+ pr_err("Failed to register ethernet device for POW\n");
kfree(dev);
} else {
cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
- pr_info("%s: POW send group %d, receive "
- "group %d\n",
- dev->name, pow_send_group,
- pow_receive_group);
+ pr_info("%s: POW send group %d, receive group %d\n",
+ dev->name, pow_send_group,
+ pow_receive_group);
}
} else {
- pr_err("Failed to allocate ethernet device "
- "for POW\n");
+ pr_err("Failed to allocate ethernet device for POW\n");
}
}
@@ -730,17 +698,15 @@ static int __init cvm_oct_init_module(void)
struct net_device *dev =
alloc_etherdev(sizeof(struct octeon_ethernet));
if (!dev) {
- pr_err("Failed to allocate ethernet device "
- "for port %d\n", port);
+ pr_err("Failed to allocate ethernet device for port %d\n", port);
continue;
}
- if (disable_core_queueing)
- dev->tx_queue_len = 0;
/* Initialize the device private structure. */
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(struct octeon_ethernet));
+ INIT_DELAYED_WORK(&priv->port_periodic_work,
+ cvm_oct_periodic_worker);
priv->imode = imode;
priv->port = port;
priv->queue = cvmx_pko_get_base_queue(priv->port);
@@ -803,44 +769,25 @@ static int __init cvm_oct_init_module(void)
fau -=
cvmx_pko_get_num_queues(priv->port) *
sizeof(uint32_t);
+ queue_delayed_work(cvm_oct_poll_queue,
+ &priv->port_periodic_work, HZ);
}
}
}
- if (INTERRUPT_LIMIT) {
- /*
- * Set the POW timer rate to give an interrupt at most
- * INTERRUPT_LIMIT times per second.
- */
- cvmx_write_csr(CVMX_POW_WQ_INT_PC,
- octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
- 16 * 256) << 8);
+ cvm_oct_tx_initialize();
+ cvm_oct_rx_initialize();
- /*
- * Enable POW timer interrupt. It will count when
- * there are packets available.
- */
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
- 0x1ful << 24);
- } else {
- /* Enable POW interrupt when our port has at least one packet */
- cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
- }
+ /*
+ * 150 uS: about 10 1500-byte packtes at 1GE.
+ */
+ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
- /* Enable the poll timer for checking RGMII status */
- init_timer(&cvm_oct_poll_timer);
- cvm_oct_poll_timer.data = 0;
- cvm_oct_poll_timer.function = cvm_do_timer;
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+ queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
return 0;
}
-/**
- * Module / driver shutdown
- *
- * Returns Zero on success
- */
static void __exit cvm_oct_cleanup_module(void)
{
int port;
@@ -853,22 +800,31 @@ static void __exit cvm_oct_cleanup_module(void)
/* Free the interrupt handler */
free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
- del_timer(&cvm_oct_poll_timer);
+ atomic_inc_return(&cvm_oct_poll_queue_stopping);
+ cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
+
cvm_oct_rx_shutdown();
+ cvm_oct_tx_shutdown();
+
cvmx_pko_disable();
/* Free the ethernet devices */
for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
if (cvm_oct_device[port]) {
- cvm_oct_tx_shutdown(cvm_oct_device[port]);
- unregister_netdev(cvm_oct_device[port]);
- kfree(cvm_oct_device[port]);
+ struct net_device *dev = cvm_oct_device[port];
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cancel_delayed_work_sync(&priv->port_periodic_work);
+
+ cvm_oct_tx_shutdown_dev(dev);
+ unregister_netdev(dev);
+ kfree(dev);
cvm_oct_device[port] = NULL;
}
}
+ destroy_workqueue(cvm_oct_poll_queue);
+
cvmx_pko_shutdown();
- cvm_oct_proc_shutdown();
cvmx_ipd_free_ptr();
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 402a15b9bb0e..d58192563552 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2007 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -57,58 +57,12 @@ struct octeon_ethernet {
uint64_t link_info;
/* Called periodically to check link status */
void (*poll) (struct net_device *dev);
+ struct delayed_work port_periodic_work;
+ struct work_struct port_work; /* may be unused. */
};
-/**
- * Free a work queue entry received in a intercept callback.
- *
- * @work_queue_entry:
- * Work queue entry to free
- * Returns Zero on success, Negative on failure.
- */
int cvm_oct_free_work(void *work_queue_entry);
-/**
- * Transmit a work queue entry out of the ethernet port. Both
- * the work queue entry and the packet data can optionally be
- * freed. The work will be freed on error as well.
- *
- * @dev: Device to transmit out.
- * @work_queue_entry:
- * Work queue entry to send
- * @do_free: True if the work queue entry and packet data should be
- * freed. If false, neither will be freed.
- * @qos: Index into the queues for this port to transmit on. This
- * is used to implement QoS if their are multiple queues per
- * port. This parameter must be between 0 and the number of
- * queues per port minus 1. Values outside of this range will
- * be change to zero.
- *
- * Returns Zero on success, negative on failure.
- */
-int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
- int do_free, int qos);
-
-/**
- * Transmit a work queue entry out of the ethernet port. Both
- * the work queue entry and the packet data can optionally be
- * freed. The work will be freed on error as well. This simply
- * wraps cvmx_oct_transmit_qos() for backwards compatability.
- *
- * @dev: Device to transmit out.
- * @work_queue_entry:
- * Work queue entry to send
- * @do_free: True if the work queue entry and packet data should be
- * freed. If false, neither will be freed.
- *
- * Returns Zero on success, negative on failure.
- */
-static inline int cvm_oct_transmit(struct net_device *dev,
- void *work_queue_entry, int do_free)
-{
- return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
-}
-
extern int cvm_oct_rgmii_init(struct net_device *dev);
extern void cvm_oct_rgmii_uninit(struct net_device *dev);
extern int cvm_oct_rgmii_open(struct net_device *dev);
@@ -134,5 +88,11 @@ extern int pow_send_group;
extern int pow_receive_group;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
+extern struct workqueue_struct *cvm_oct_poll_queue;
+extern atomic_t cvm_oct_poll_queue_stopping;
+extern u64 cvm_oct_tx_poll_interval;
+
+extern int max_rx_cpus;
+extern int rx_napi_weight;
#endif
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
index 133b86c6a678..2fff0a0052d1 100644
--- a/drivers/staging/sm7xx/smtc2d.c
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -5,7 +5,7 @@
* Author: Boyod boyod.yang@siliconmotion.com.cn
*
* Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzj@lemote.com
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
index 38d0c335322b..02b4fa29136c 100644
--- a/drivers/staging/sm7xx/smtc2d.h
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -5,7 +5,7 @@
* Author: Ge Wang, gewang@siliconmotion.com
*
* Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzj@lemote.com
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 161dbc9c1397..a4f6f49aef48 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -6,7 +6,7 @@
* Boyod boyod.yang@siliconmotion.com.cn
*
* Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzj@lemote.com
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index 7f2c34138215..7ee565c2c952 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -6,7 +6,7 @@
* Boyod boyod.yang@siliconmotion.com.cn
*
* Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzj@lemote.com
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
OpenPOWER on IntegriCloud