From ca0c9584b1f16bd5911893647cb7f1be82e60554 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:22 +0900 Subject: this_cpu: Straight transformations Use this_cpu_ptr and __this_cpu_ptr in locations where straight transformations are possible because per_cpu_ptr is used with either smp_processor_id() or raw_smp_processor_id(). cc: David Howells Acked-by: Tejun Heo cc: Ingo Molnar cc: Rusty Russell cc: Eric Dumazet Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- drivers/net/chelsio/sge.c | 5 ++--- drivers/net/loopback.c | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8c658cf6f62f..109d2783e4d8 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) } __skb_pull(skb, sizeof(*p)); - st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); + st = this_cpu_ptr(sge->port_stats[p->iff]); skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && @@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct sge *sge = adapter->sge; - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], - smp_processor_id()); + struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); struct cpl_tx_pkt *cpl; struct sk_buff *orig_skb = skb; int ret; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 1bc654a73c47..8ebeb76a373d 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, /* it's OK to use per_cpu_ptr() because BHs are off */ pcpu_lstats = dev->ml_priv; - lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); + lb_stats = this_cpu_ptr(pcpu_lstats); len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { -- cgit v1.2.1 From e7dcaa4755e35d7540bf19f316f8798357c53fa0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 3 Oct 2009 19:48:23 +0900 Subject: this_cpu: Eliminate get/put_cpu There are cases where we can use this_cpu_ptr and as the result of using this_cpu_ptr() we no longer need to determine the currently executing cpu. In those places no get/put_cpu combination is needed anymore. The local cpu variable can be eliminated. Preemption still needs to be disabled and enabled since the modifications of the per cpu variables is not atomic. There may be multiple per cpu variables modified and those must all be from the same processor. Acked-by: Maciej Sosnowski Acked-by: Dan Williams Acked-by: Tejun Heo cc: Eric Biederman cc: Stephen Hemminger cc: David L Stevens Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- drivers/net/veth.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ade5b344f75d..0c4a81124257 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -153,7 +153,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; - int length, cpu; + int length; skb_orphan(skb); @@ -161,9 +161,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcv = priv->peer; rcv_priv = netdev_priv(rcv); - cpu = smp_processor_id(); - stats = per_cpu_ptr(priv->stats, cpu); - rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu); + stats = this_cpu_ptr(priv->stats); + rcv_stats = this_cpu_ptr(rcv_priv->stats); if (!(rcv->flags & IFF_UP)) goto tx_drop; -- cgit v1.2.1 From 417608c20a4c8397bc5307d949ec01ea0a0dd8e5 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 12 Nov 2009 11:19:44 -0800 Subject: IB/mlx4: Remove limitation on LSO header size Current code has a limitation: an LSO header is not allowed to cross a 64 byte boundary. This patch removes this limitation by setting the WQE RR for large headers thus allowing LSO headers of any size. The extra buffer reserved for MLX4_IB_QP_LSO QPs has been doubled, from 64 to 128 bytes, assuming this is reasonable upper limit for header length. Also, this patch will cause IB_DEVICE_UD_TSO to be set only for HCA FW versions that set MLX4_DEV_CAP_FLAG_BLH; e.g. FW version 2.6.000 and higher. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/mlx4/fw.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 3c16602172fc..7194be3a2894 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) [ 9] = "Q_Key violation counter", [10] = "VMM", [12] = "DPDP", + [15] = "Big LSO headers", [16] = "MW support", [17] = "APM support", [18] = "Atomic ops support", -- cgit v1.2.1 From be504b0b9fbe9ba447c93ef0f5789f377102d555 Mon Sep 17 00:00:00 2001 From: Yevgeny Petrilin Date: Thu, 12 Nov 2009 15:51:16 -0800 Subject: mlx4_core: Fix parsing of reserved EQ cap Value returned by firmware is the actual value, not a log. Signed-off-by: Liran Liss Signed-off-by: Roland Dreier --- drivers/net/mlx4/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 7194be3a2894..04f42ae1eda0 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -236,7 +236,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); dev_cap->max_mpts = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); - dev_cap->reserved_eqs = 1 << (field & 0xf); + dev_cap->reserved_eqs = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); dev_cap->max_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); -- cgit v1.2.1 From d95cacc599a79d87052b3975f60cae62c04dc01f Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 20 Oct 2009 20:06:21 +0000 Subject: powerpc: Move ehea hcall definitions into hvcall.h Move ehea hcall definitions into hvcall.h. Signed-off-by: Anton Blanchard Acked-by: Thomas Klein Signed-off-by: Benjamin Herrenschmidt --- drivers/net/ehea/ehea_hcall.h | 51 ------------------------------------------- drivers/net/ehea/ehea_phyp.h | 1 - 2 files changed, 52 deletions(-) delete mode 100644 drivers/net/ehea/ehea_hcall.h (limited to 'drivers/net') diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h deleted file mode 100644 index 8e7d1c3edc60..000000000000 --- a/drivers/net/ehea/ehea_hcall.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * linux/drivers/net/ehea/ehea_hcall.h - * - * eHEA ethernet device driver for IBM eServer System p - * - * (C) Copyright IBM Corp. 2006 - * - * Authors: - * Christoph Raisch - * Jan-Bernd Themann - * Thomas Klein - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __EHEA_HCALL_H__ -#define __EHEA_HCALL_H__ - -/** - * This file contains HCALL defines that are to be included in the appropriate - * kernel files later - */ - -#define H_ALLOC_HEA_RESOURCE 0x278 -#define H_MODIFY_HEA_QP 0x250 -#define H_QUERY_HEA_QP 0x254 -#define H_QUERY_HEA 0x258 -#define H_QUERY_HEA_PORT 0x25C -#define H_MODIFY_HEA_PORT 0x260 -#define H_REG_BCMC 0x264 -#define H_DEREG_BCMC 0x268 -#define H_REGISTER_HEA_RPAGES 0x26C -#define H_DISABLE_AND_GET_HEA 0x270 -#define H_GET_HEA_INFO 0x274 -#define H_ADD_CONN 0x284 -#define H_DEL_CONN 0x288 - -#endif /* __EHEA_HCALL_H__ */ diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index f3628c803567..2f8174c248bc 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -33,7 +33,6 @@ #include #include "ehea.h" #include "ehea_hw.h" -#include "ehea_hcall.h" /* Some abbreviations used here: * -- cgit v1.2.1 From 8ae45a535b172d350759eec2fcecf4368d4c13d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Ha=C5=82asa?= Date: Sun, 18 Oct 2009 21:03:08 +0200 Subject: IXP4xx: Fix normally-disabled debugging text in drivers/net/arm/ixp4xx_eth.c. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof HaƂasa --- drivers/net/arm/ixp4xx_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 691b81eb0f46..c3dfbdd2cdcf 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -322,7 +322,7 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); spin_unlock_irqrestore(&mdio_lock, flags); #if DEBUG_MDIO - printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n", + printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", bus->name, phy_id, location, val, ret); #endif return ret; -- cgit v1.2.1 From e15c1c1f3f903f679c9782b540f9d52c80c99610 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sat, 28 Nov 2009 18:12:06 +0100 Subject: pcmcia: remove unused IRQ_FIRST_SHARED Komuro pointed out that IRQ_FIRST_SHARED is not used at all in the PCMCIA subsystem, so remove it. Also, remove two bogus assignments. CC: Karsten Keil CC: netdev@vger.kernel.org CC: alsa-devel@alsa-project.org CC: Komuro Signed-off-by: Dominik Brodowski --- drivers/net/pcmcia/axnet_cs.c | 2 +- drivers/net/pcmcia/fmvj18x_cs.c | 2 +- drivers/net/pcmcia/pcnet_cs.c | 2 +- drivers/net/pcmcia/smc91c92_cs.c | 2 +- drivers/net/pcmcia/xirc2ps_cs.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 800597b82d18..cb2ce03bd681 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -270,7 +270,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 6e3e1ced6db4..fc5a890ba0a9 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -426,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link) if (link->io.NumPorts2 != 0) { link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index cbe462ed221f..f41fbcc34327 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 9e0da370912e..49185c3c1201 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -454,7 +454,7 @@ static int mhz_mfc_config(struct pcmcia_device *link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status = CCSR_AUDIO_ENA; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; link->io.IOAddrLines = 16; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->io.NumPorts2 = 8; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index fe504b7f369f..55fa5ca245d1 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -841,7 +841,7 @@ xirc2ps_config(struct pcmcia_device * link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status |= CCSR_AUDIO_ENA; } - link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ; + link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING; link->io.NumPorts2 = 8; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; if (local->dingo) { -- cgit v1.2.1 From e2b18e3018630d80eda54508e697d613283d57ac Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Fri, 11 Dec 2009 16:16:33 -0800 Subject: smc91x: remove OMAP specific bits Now that all OMAP boards are using the board resources, we don't need to keep the arch/board specific crap in the driver header. Cc: linux-net@vger.kernel.org Acked-by: Nicolas Pitre Signed-off-by: Ladislav Michl Signed-off-by: Tony Lindgren --- drivers/net/smc91x.h | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 7815bfc300f5..54799544bda3 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -206,21 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) } } -#elif defined(CONFIG_ARCH_OMAP) - -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - #elif defined(CONFIG_SH_SH4202_MICRODEV) #define SMC_CAN_USE_8BIT 0 -- cgit v1.2.1 From 46e75f66677f5094bb51e91f9473128c4e907c7d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 7 Dec 2009 19:56:42 +1100 Subject: net: fix for utsrelease.h moving to generated Signed-off-by: Stephen Rothwell Signed-off-by: Michal Marek --- drivers/net/wireless/iwlwifi/iwl-core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 675b7df632fc..27ca859e7453 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -63,7 +63,7 @@ #ifndef __iwl_core_h__ #define __iwl_core_h__ -#include +#include /************************ * forward declarations * -- cgit v1.2.1 From 471452104b8520337ae2fb48c4e61cd4896e025d Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 14 Dec 2009 18:00:08 -0800 Subject: const: constify remaining dev_pm_ops Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/3c59x.c | 2 +- drivers/net/dm9000.c | 2 +- drivers/net/r8169.c | 2 +- drivers/net/smsc911x.c | 2 +- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 78b7167a8ce3..39db0e96815d 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev) return 0; } -static struct dev_pm_ops vortex_pm_ops = { +static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0cbe3c0e7c06..b37730065688 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops dm9000_drv_pm_ops = { +static const struct dev_pm_ops dm9000_drv_pm_ops = { .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index acfc5a3aa490..60f96c468a24 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -4859,7 +4859,7 @@ out: return 0; } -static struct dev_pm_ops rtl8169_pm_ops = { +static const struct dev_pm_ops rtl8169_pm_ops = { .suspend = rtl8169_suspend, .resume = rtl8169_resume, .freeze = rtl8169_suspend, diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 20d6095cf411..494cd91ea39c 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev) return (to == 0) ? -EIO : 0; } -static struct dev_pm_ops smsc911x_pm_ops = { +static const struct dev_pm_ops smsc911x_pm_ops = { .suspend = smsc911x_suspend, .resume = smsc911x_resume, }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1ceb9d0f8b97..9cc438282d77 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device) return 0; } -static struct dev_pm_ops vmxnet3_pm_ops = { +static const struct dev_pm_ops vmxnet3_pm_ops = { .suspend = vmxnet3_suspend, .resume = vmxnet3_resume, }; -- cgit v1.2.1 From 43ff8b60853793fb0155b3e465739d2170c3aa2f Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 15 Dec 2009 16:48:29 -0800 Subject: mlx4: use bitmap_find_next_zero_area Signed-off-by: Akinobu Mita Reviewed-by: Roland Dreier Cc: Yevgeny Petrilin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/mlx4/alloc.c | 37 ++++--------------------------------- 1 file changed, 4 insertions(+), 33 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index ad95d5f7b630..8c8515619b8e 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c @@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) mlx4_bitmap_free_range(bitmap, obj, 1); } -static unsigned long find_aligned_range(unsigned long *bitmap, - u32 start, u32 nbits, - int len, int align) -{ - unsigned long end, i; - -again: - start = ALIGN(start, align); - - while ((start < nbits) && test_bit(start, bitmap)) - start += align; - - if (start >= nbits) - return -1; - - end = start+len; - if (end > nbits) - return -1; - - for (i = start + 1; i < end; i++) { - if (test_bit(i, bitmap)) { - start = i + 1; - goto again; - } - } - - return start; -} - u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) { u32 obj, i; @@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) spin_lock(&bitmap->lock); - obj = find_aligned_range(bitmap->table, bitmap->last, - bitmap->max, cnt, align); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->last, cnt, align - 1); if (obj >= bitmap->max) { bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) & bitmap->mask; - obj = find_aligned_range(bitmap->table, 0, bitmap->max, - cnt, align); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + 0, cnt, align - 1); } if (obj < bitmap->max) { -- cgit v1.2.1 From 25d967b72a92d72b6e0263a0337dfc940bd6c044 Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 14 Oct 2009 12:04:38 -0700 Subject: NET: Add driver for Octeon MDIO buses. The Octeon SOC has two types of Ethernet ports, each type with its own driver. However, the PHYs for all the ports are controlled by a common MDIO bus. Because the mdio driver is not associated with a particular driver, but is instead a system level resource, we create s stand-alone driver for it. As for the driver, we put the register definitions in arch/mips/include/asm/octeon where most of the other Octeon register definitions live. This is a platform driver with the platform device for "mdio-octeon" being registered in the platform startup code. Signed-off-by: David Daney Acked-by: David S. Miller Signed-off-by: Ralf Baechle --- drivers/net/phy/Kconfig | 11 +++ drivers/net/phy/Makefile | 1 + drivers/net/phy/mdio-octeon.c | 180 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+) create mode 100644 drivers/net/phy/mdio-octeon.c (limited to 'drivers/net') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index d5d8e1c5bc91..fc5938ba3d78 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -115,4 +115,15 @@ config MDIO_GPIO To compile this driver as a module, choose M here: the module will be called mdio-gpio. +config MDIO_OCTEON + tristate "Support for MDIO buses on Octeon SOCs" + depends on CPU_CAVIUM_OCTEON + default y + help + + This module provides a driver for the Octeon MDIO busses. + It is required by the Octeon Ethernet device drivers. + + If in doubt, say Y. + endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index edfaac48cbd5..1342585af381 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_STE10XP) += ste10Xp.o +obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c new file mode 100644 index 000000000000..61a4461cbda5 --- /dev/null +++ b/drivers/net/phy/mdio-octeon.c @@ -0,0 +1,180 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#include +#include +#include +#include + +#include +#include + +#define DRV_VERSION "1.0" +#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver" + +struct octeon_mdiobus { + struct mii_bus *mii_bus; + int unit; + int phy_irq[PHY_MAX_ADDR]; +}; + +static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct octeon_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + int timeout = 1000; + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + + do { + /* + * Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + cvmx_wait(1000); + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit)); + } while (smi_rd.s.pending && --timeout); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return -EIO; +} + +static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, + int regnum, u16 val) +{ + struct octeon_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + + do { + /* + * Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit)); + } while (smi_wr.s.pending && --timeout); + + if (timeout <= 0) + return -EIO; + + return 0; +} + +static int __init octeon_mdiobus_probe(struct platform_device *pdev) +{ + struct octeon_mdiobus *bus; + int i; + int err = -ENOENT; + + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + /* The platform_device id is our unit number. */ + bus->unit = pdev->id; + + bus->mii_bus = mdiobus_alloc(); + + if (!bus->mii_bus) + goto err; + + /* + * Standard Octeon evaluation boards don't support phy + * interrupts, we need to poll. + */ + for (i = 0; i < PHY_MAX_ADDR; i++) + bus->phy_irq[i] = PHY_POLL; + + bus->mii_bus->priv = bus; + bus->mii_bus->irq = bus->phy_irq; + bus->mii_bus->name = "mdio-octeon"; + snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit); + bus->mii_bus->parent = &pdev->dev; + + bus->mii_bus->read = octeon_mdiobus_read; + bus->mii_bus->write = octeon_mdiobus_write; + + dev_set_drvdata(&pdev->dev, bus); + + err = mdiobus_register(bus->mii_bus); + if (err) + goto err_register; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + + return 0; +err_register: + mdiobus_free(bus->mii_bus); + +err: + devm_kfree(&pdev->dev, bus); + return err; +} + +static int __exit octeon_mdiobus_remove(struct platform_device *pdev) +{ + struct octeon_mdiobus *bus; + + bus = dev_get_drvdata(&pdev->dev); + + mdiobus_unregister(bus->mii_bus); + mdiobus_free(bus->mii_bus); + return 0; +} + +static struct platform_driver octeon_mdiobus_driver = { + .driver = { + .name = "mdio-octeon", + .owner = THIS_MODULE, + }, + .probe = octeon_mdiobus_probe, + .remove = __exit_p(octeon_mdiobus_remove), +}; + +void octeon_mdiobus_force_mod_depencency(void) +{ + /* Let ethernet drivers force us to be loaded. */ +} +EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); + +static int __init octeon_mdiobus_mod_init(void) +{ + return platform_driver_register(&octeon_mdiobus_driver); +} + +static void __exit octeon_mdiobus_mod_exit(void) +{ + platform_driver_unregister(&octeon_mdiobus_driver); +} + +module_init(octeon_mdiobus_mod_init); +module_exit(octeon_mdiobus_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); -- cgit v1.2.1 From d6aa60a10b2f5068e331ca2936b1e6c248ae37c1 Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 14 Oct 2009 12:04:41 -0700 Subject: NET: Add Ethernet driver for Octeon MGMT devices. The Octeon MGMT Ethernet ports are present in some members of the Octeon SOC family (cn52XX and cn56XX have them). The mdio bus connected to the MGMT PHYs is shared with the main octeon-ethernet driver, we force it to be loaded first by calling octeon_mdiobus_force_mod_depencency. The platform devices for the MGMT Ethernet ports are added in arch/mips/cavium-octeon/octeon-platform.c, and the register definitions for the ports live in arch/mips/include/asm/octeon/ along with their ilk. Although it currently is the only driver in drivers/net/octeon, the directory was created looking forward to the day that octeon-ethernet will move there from its current home in drivers/staging. Signed-off-by: David Daney Acked-by: David S. Miller Signed-off-by: Ralf Baechle --- drivers/net/Kconfig | 2 + drivers/net/Makefile | 2 + drivers/net/octeon/Kconfig | 10 + drivers/net/octeon/Makefile | 2 + drivers/net/octeon/octeon_mgmt.c | 1176 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 1192 insertions(+) create mode 100644 drivers/net/octeon/Kconfig create mode 100644 drivers/net/octeon/Makefile create mode 100644 drivers/net/octeon/octeon_mgmt.c (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a5be9ac6405c..e58a65391ad2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1953,6 +1953,8 @@ config BCM63XX_ENET source "drivers/net/fs_enet/Kconfig" +source "drivers/net/octeon/Kconfig" + endif # NET_ETHERNET # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 246323d7f161..ad1346dd9da9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_SFC) += sfc/ obj-$(CONFIG_WIMAX) += wimax/ + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig new file mode 100644 index 000000000000..1e56bbf3f5c0 --- /dev/null +++ b/drivers/net/octeon/Kconfig @@ -0,0 +1,10 @@ +config OCTEON_MGMT_ETHERNET + tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" + depends on CPU_CAVIUM_OCTEON + select PHYLIB + select MDIO_OCTEON + default y + help + This option enables the ethernet driver for the management + port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, + CN54XX, CN52XX, and CN6XXX chips. diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile new file mode 100644 index 000000000000..906edecacfd3 --- /dev/null +++ b/drivers/net/octeon/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c new file mode 100644 index 000000000000..050538bf155a --- /dev/null +++ b/drivers/net/octeon/octeon_mgmt.c @@ -0,0 +1,1176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "octeon_mgmt" +#define DRV_VERSION "2.0" +#define DRV_DESCRIPTION \ + "Cavium Networks Octeon MII (management) port Network Driver" + +#define OCTEON_MGMT_NAPI_WEIGHT 16 + +/* + * Ring sizes that are powers of two allow for more efficient modulo + * opertions. + */ +#define OCTEON_MGMT_RX_RING_SIZE 512 +#define OCTEON_MGMT_TX_RING_SIZE 128 + +/* Allow 8 bytes for vlan and FCS. */ +#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +union mgmt_port_ring_entry { + u64 d64; + struct { + u64 reserved_62_63:2; + /* Length of the buffer/packet in bytes */ + u64 len:14; + /* For TX, signals that the packet should be timestamped */ + u64 tstamp:1; + /* The RX error code */ + u64 code:7; +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 + /* Physical address of the buffer */ + u64 addr:40; + } s; +}; + +struct octeon_mgmt { + struct net_device *netdev; + int port; + int irq; + u64 *tx_ring; + dma_addr_t tx_ring_handle; + unsigned int tx_next; + unsigned int tx_next_clean; + unsigned int tx_current_fill; + /* The tx_list lock also protects the ring related variables */ + struct sk_buff_head tx_list; + + /* RX variables only touched in napi_poll. No locking necessary. */ + u64 *rx_ring; + dma_addr_t rx_ring_handle; + unsigned int rx_next; + unsigned int rx_next_fill; + unsigned int rx_current_fill; + struct sk_buff_head rx_list; + + spinlock_t lock; + unsigned int last_duplex; + unsigned int last_link; + struct device *dev; + struct napi_struct napi; + struct tasklet_struct tx_clean_tasklet; + struct phy_device *phydev; +}; + +static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) +{ + int port = p->port; + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.s.ithena = enable ? 1 : 0; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) +{ + int port = p->port; + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.s.othena = enable ? 1 : 0; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 1); +} + +static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 0); +} + +static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 1); +} + +static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 0); +} + +static unsigned int ring_max_fill(unsigned int ring_size) +{ + return ring_size - 8; +} + +static unsigned int ring_size_to_bytes(unsigned int ring_size) +{ + return ring_size * sizeof(union mgmt_port_ring_entry); +} + +static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + + while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { + unsigned int size; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + + /* CN56XX pass 1 needs 8 bytes of padding. */ + size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; + + skb = netdev_alloc_skb(netdev, size); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + __skb_queue_tail(&p->rx_list, skb); + + re.d64 = 0; + re.s.len = size; + re.s.addr = dma_map_single(p->dev, skb->data, + size, + DMA_FROM_DEVICE); + + /* Put it in the ring. */ + p->rx_ring[p->rx_next_fill] = re.d64; + dma_sync_single_for_device(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->rx_next_fill = + (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill++; + /* Ring the bell. */ + cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); + } +} + +static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) +{ + int port = p->port; + union cvmx_mixx_orcnt mix_orcnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + int cleaned = 0; + unsigned long flags; + + mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + while (mix_orcnt.s.orcnt) { + dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + re.d64 = p->tx_ring[p->tx_next_clean]; + p->tx_next_clean = + (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; + skb = __skb_dequeue(&p->tx_list); + + mix_orcnt.u64 = 0; + mix_orcnt.s.orcnt = 1; + + /* Acknowledge to hardware that we have the buffer. */ + cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); + p->tx_current_fill--; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + cleaned++; + + mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + } + + if (cleaned && netif_queue_stopped(p->netdev)) + netif_wake_queue(p->netdev); +} + +static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) +{ + struct octeon_mgmt *p = (struct octeon_mgmt *)arg; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_enable_tx_irq(p); +} + +static void octeon_mgmt_update_rx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + unsigned long flags; + u64 drop, bad; + + /* These reads also clear the count registers. */ + drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); + bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); + + if (drop || bad) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.rx_errors += bad; + netdev->stats.rx_dropped += drop; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +static void octeon_mgmt_update_tx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + unsigned long flags; + + union cvmx_agl_gmx_txx_stat0 s0; + union cvmx_agl_gmx_txx_stat1 s1; + + /* These reads also clear the count registers. */ + s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); + s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); + + if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; + netdev->stats.collisions += s1.s.scol + s1.s.mcol; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +/* + * Dequeue a receive skb and its corresponding ring entry. The ring + * entry is returned, *pskb is updated to point to the skb. + */ +static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, + struct sk_buff **pskb) +{ + union mgmt_port_ring_entry re; + + dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->rx_ring[p->rx_next]; + p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill--; + *pskb = __skb_dequeue(&p->rx_list); + + dma_unmap_single(p->dev, re.s.addr, + ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, + DMA_FROM_DEVICE); + + return re.d64; +} + + +static int octeon_mgmt_receive_one(struct octeon_mgmt *p) +{ + int port = p->port; + struct net_device *netdev = p->netdev; + union cvmx_mixx_ircnt mix_ircnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + struct sk_buff *skb2; + struct sk_buff *skb_new; + union mgmt_port_ring_entry re2; + int rc = 1; + + + re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); + if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { + /* A good packet, send it up. */ + skb_put(skb, re.s.len); +good: + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + netdev->last_rx = jiffies; + netif_receive_skb(skb); + rc = 0; + } else if (re.s.code == RING_ENTRY_CODE_MORE) { + /* + * Packet split across skbs. This can happen if we + * increase the MTU. Buffers that are already in the + * rx ring can then end up being too small. As the rx + * ring is refilled, buffers sized for the new MTU + * will be used and we should go back to the normal + * non-split case. + */ + skb_put(skb, re.s.len); + do { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + if (re2.s.code != RING_ENTRY_CODE_MORE + && re2.s.code != RING_ENTRY_CODE_DONE) + goto split_error; + skb_put(skb2, re2.s.len); + skb_new = skb_copy_expand(skb, 0, skb2->len, + GFP_ATOMIC); + if (!skb_new) + goto split_error; + if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), + skb2->len)) + goto split_error; + skb_put(skb_new, skb2->len); + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + skb = skb_new; + } while (re2.s.code == RING_ENTRY_CODE_MORE); + goto good; + } else { + /* Some other error, discard it. */ + dev_kfree_skb_any(skb); + /* + * Error statistics are accumulated in + * octeon_mgmt_update_rx_stats. + */ + } + goto done; +split_error: + /* Discard the whole mess. */ + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + while (re2.s.code == RING_ENTRY_CODE_MORE) { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + dev_kfree_skb_any(skb2); + } + netdev->stats.rx_errors++; + +done: + /* Tell the hardware we processed a packet. */ + mix_ircnt.u64 = 0; + mix_ircnt.s.ircnt = 1; + cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); + return rc; + +} + +static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) +{ + int port = p->port; + unsigned int work_done = 0; + union cvmx_mixx_ircnt mix_ircnt; + int rc; + + + mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + while (work_done < budget && mix_ircnt.s.ircnt) { + + rc = octeon_mgmt_receive_one(p); + if (!rc) + work_done++; + + /* Check for more packets. */ + mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + } + + octeon_mgmt_rx_fill_ring(p->netdev); + + return work_done; +} + +static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); + struct net_device *netdev = p->netdev; + unsigned int work_done = 0; + + work_done = octeon_mgmt_receive_packets(p, budget); + + if (work_done < budget) { + /* We stopped because no more packets were available. */ + napi_complete(napi); + octeon_mgmt_enable_rx_irq(p); + } + octeon_mgmt_update_rx_stats(netdev); + + return work_done; +} + +/* Reset the hardware to clean state. */ +static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) +{ + union cvmx_mixx_ctl mix_ctl; + union cvmx_mixx_bist mix_bist; + union cvmx_agl_gmx_bist agl_gmx_bist; + + mix_ctl.u64 = 0; + cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + } while (mix_ctl.s.busy); + mix_ctl.s.reset = 1; + cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + cvmx_wait(64); + + mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); + if (mix_bist.u64) + dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", + (unsigned long long)mix_bist.u64); + + agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); + if (agl_gmx_bist.u64) + dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", + (unsigned long long)agl_gmx_bist.u64); +} + +struct octeon_mgmt_cam_state { + u64 cam[6]; + u64 cam_mask; + int cam_index; +}; + +static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, + unsigned char *addr) +{ + int i; + + for (i = 0; i < 6; i++) + cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); + cs->cam_mask |= (1ULL << cs->cam_index); + cs->cam_index++; +} + +static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + int i; + union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; + unsigned long flags; + unsigned int prev_packet_enable; + unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ + unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ + struct octeon_mgmt_cam_state cam_state; + struct dev_addr_list *list; + struct list_head *pos; + int available_cam_entries; + + memset(&cam_state, 0, sizeof(cam_state)); + + if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { + cam_mode = 0; + available_cam_entries = 8; + } else { + /* + * One CAM entry for the primary address, leaves seven + * for the secondary addresses. + */ + available_cam_entries = 7 - netdev->dev_addrs.count; + } + + if (netdev->flags & IFF_MULTICAST) { + if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) + || netdev->mc_count > available_cam_entries) + multicast_mode = 2; /* 1 - Accept all multicast. */ + else + multicast_mode = 0; /* 0 - Use CAM. */ + } + + if (cam_mode == 1) { + /* Add primary address. */ + octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); + list_for_each(pos, &netdev->dev_addrs.list) { + struct netdev_hw_addr *hw_addr; + hw_addr = list_entry(pos, struct netdev_hw_addr, list); + octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr); + list = list->next; + } + } + if (multicast_mode == 0) { + i = netdev->mc_count; + list = netdev->mc_list; + while (i--) { + octeon_mgmt_cam_state_add(&cam_state, list->da_addr); + list = list->next; + } + } + + + spin_lock_irqsave(&p->lock, flags); + + /* Disable packet I/O. */ + agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prev_packet_enable = agl_gmx_prtx.s.en; + agl_gmx_prtx.s.en = 0; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + + + adr_ctl.u64 = 0; + adr_ctl.s.cam_mode = cam_mode; + adr_ctl.s.mcst = multicast_mode; + adr_ctl.s.bcst = 1; /* Allow broadcast */ + + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); + + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); + + /* Restore packet I/O. */ + agl_gmx_prtx.s.en = prev_packet_enable; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + + spin_unlock_irqrestore(&p->lock, flags); +} + +static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) +{ + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + + octeon_mgmt_set_rx_filtering(netdev); + + return 0; +} + +static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + + /* + * Limit the MTU to make sure the ethernet packets are between + * 64 bytes and 16383 bytes. + */ + if (size_without_fcs < 64 || size_without_fcs > 16383) { + dev_warn(p->dev, "MTU must be between %d and %d.\n", + 64 - OCTEON_MGMT_RX_HEADROOM, + 16383 - OCTEON_MGMT_RX_HEADROOM); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); + cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), + (size_without_fcs + 7) & 0xfff8); + + return 0; +} + +static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_mixx_isr mixx_isr; + + mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); + + /* Clear any pending interrupts */ + cvmx_write_csr(CVMX_MIXX_ISR(port), + cvmx_read_csr(CVMX_MIXX_ISR(port))); + cvmx_read_csr(CVMX_MIXX_ISR(port)); + + if (mixx_isr.s.irthresh) { + octeon_mgmt_disable_rx_irq(p); + napi_schedule(&p->napi); + } + if (mixx_isr.s.orthresh) { + octeon_mgmt_disable_tx_irq(p); + tasklet_schedule(&p->tx_clean_tasklet); + } + + return IRQ_HANDLED; +} + +static int octeon_mgmt_ioctl(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!p->phydev) + return -EINVAL; + + return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); +} + +static void octeon_mgmt_adjust_link(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + unsigned long flags; + int link_changed = 0; + + spin_lock_irqsave(&p->lock, flags); + if (p->phydev->link) { + if (!p->last_link) + link_changed = 1; + if (p->last_duplex != p->phydev->duplex) { + p->last_duplex = p->phydev->duplex; + prtx_cfg.u64 = + cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.duplex = p->phydev->duplex; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), + prtx_cfg.u64); + } + } else { + if (p->last_link) + link_changed = -1; + } + p->last_link = p->phydev->link; + spin_unlock_irqrestore(&p->lock, flags); + + if (link_changed != 0) { + if (link_changed > 0) { + netif_carrier_on(netdev); + pr_info("%s: Link is up - %d/%s\n", netdev->name, + p->phydev->speed, + DUPLEX_FULL == p->phydev->duplex ? + "Full" : "Half"); + } else { + netif_carrier_off(netdev); + pr_info("%s: Link is down\n", netdev->name); + } + } +} + +static int octeon_mgmt_init_phy(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + char phy_id[20]; + + if (octeon_is_simulation()) { + /* No PHYs in the simulator. */ + netif_carrier_on(netdev); + return 0; + } + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); + + p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(p->phydev)) { + p->phydev = NULL; + return -1; + } + + phy_start_aneg(p->phydev); + + return 0; +} + +static int octeon_mgmt_open(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_mixx_ctl mix_ctl; + union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; + union cvmx_mixx_oring1 oring1; + union cvmx_mixx_iring1 iring1; + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mixx_irhwm mix_irhwm; + union cvmx_mixx_orhwm mix_orhwm; + union cvmx_mixx_intena mix_intena; + struct sockaddr sa; + + /* Allocate ring buffers. */ + p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + GFP_KERNEL); + if (!p->tx_ring) + return -ENOMEM; + p->tx_ring_handle = + dma_map_single(p->dev, p->tx_ring, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->tx_next = 0; + p->tx_next_clean = 0; + p->tx_current_fill = 0; + + + p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + GFP_KERNEL); + if (!p->rx_ring) + goto err_nomem; + p->rx_ring_handle = + dma_map_single(p->dev, p->rx_ring, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + p->rx_next = 0; + p->rx_next_fill = 0; + p->rx_current_fill = 0; + + octeon_mgmt_reset_hw(p); + + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + + /* Bring it out of reset if needed. */ + if (mix_ctl.s.reset) { + mix_ctl.s.reset = 0; + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + } while (mix_ctl.s.reset); + } + + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + + oring1.u64 = 0; + oring1.s.obase = p->tx_ring_handle >> 3; + oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; + cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); + + iring1.u64 = 0; + iring1.s.ibase = p->rx_ring_handle >> 3; + iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; + cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); + + /* Disable packet I/O. */ + prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.en = 0; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + + memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); + octeon_mgmt_set_mac_address(netdev, &sa); + + octeon_mgmt_change_mtu(netdev, netdev->mtu); + + /* + * Enable the port HW. Packets are not allowed until + * cvmx_mgmt_port_enable() is called. + */ + mix_ctl.u64 = 0; + mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ + mix_ctl.s.en = 1; /* Enable the port */ + mix_ctl.s.nbtarb = 0; /* Arbitration mode */ + /* MII CB-request FIFO programmable high watermark */ + mix_ctl.s.mrq_hwm = 1; + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* + * Force compensation values, as they are not + * determined properly by HW + */ + union cvmx_agl_gmx_drv_ctl drv_ctl; + + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); + if (port) { + drv_ctl.s.byp_en1 = 1; + drv_ctl.s.nctl1 = 6; + drv_ctl.s.pctl1 = 6; + } else { + drv_ctl.s.byp_en = 1; + drv_ctl.s.nctl = 6; + drv_ctl.s.pctl = 6; + } + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + } + + octeon_mgmt_rx_fill_ring(netdev); + + /* Clear statistics. */ + /* Clear on read. */ + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); + + cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); + cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); + cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); + + /* Clear any pending interrupts */ + cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); + + if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, + netdev)) { + dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); + goto err_noirq; + } + + /* Interrupt every single RX packet */ + mix_irhwm.u64 = 0; + mix_irhwm.s.irhwm = 0; + cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); + + /* Interrupt when we have 5 or more packets to clean. */ + mix_orhwm.u64 = 0; + mix_orhwm.s.orhwm = 5; + cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); + + /* Enable receive and transmit interrupts */ + mix_intena.u64 = 0; + mix_intena.s.ithena = 1; + mix_intena.s.othena = 1; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + + + /* Enable packet I/O. */ + + rxx_frm_ctl.u64 = 0; + rxx_frm_ctl.s.pre_align = 1; + /* + * When set, disables the length check for non-min sized pkts + * with padding in the client data. + */ + rxx_frm_ctl.s.pad_len = 1; + /* When set, disables the length check for VLAN pkts */ + rxx_frm_ctl.s.vlan_len = 1; + /* When set, PREAMBLE checking is less strict */ + rxx_frm_ctl.s.pre_free = 1; + /* Control Pause Frames can match station SMAC */ + rxx_frm_ctl.s.ctl_smac = 0; + /* Control Pause Frames can match globally assign Multicast address */ + rxx_frm_ctl.s.ctl_mcst = 1; + /* Forward pause information to TX block */ + rxx_frm_ctl.s.ctl_bck = 1; + /* Drop Control Pause Frames */ + rxx_frm_ctl.s.ctl_drp = 1; + /* Strip off the preamble */ + rxx_frm_ctl.s.pre_strp = 1; + /* + * This port is configured to send PREAMBLE+SFD to begin every + * frame. GMX checks that the PREAMBLE is sent correctly. + */ + rxx_frm_ctl.s.pre_chk = 1; + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); + + /* Enable the AGL block */ + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + + /* Configure the port duplex and enables */ + prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.tx_en = 1; + prtx_cfg.s.rx_en = 1; + prtx_cfg.s.en = 1; + p->last_duplex = 1; + prtx_cfg.s.duplex = p->last_duplex; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + + p->last_link = 0; + netif_carrier_off(netdev); + + if (octeon_mgmt_init_phy(netdev)) { + dev_err(p->dev, "Cannot initialize PHY.\n"); + goto err_noirq; + } + + netif_wake_queue(netdev); + napi_enable(&p->napi); + + return 0; +err_noirq: + octeon_mgmt_reset_hw(p); + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); +err_nomem: + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + return -ENOMEM; +} + +static int octeon_mgmt_stop(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + napi_disable(&p->napi); + netif_stop_queue(netdev); + + if (p->phydev) + phy_disconnect(p->phydev); + + netif_carrier_off(netdev); + + octeon_mgmt_reset_hw(p); + + + free_irq(p->irq, netdev); + + /* dma_unmap is a nop on Octeon, so just free everything. */ + skb_queue_purge(&p->tx_list); + skb_queue_purge(&p->rx_list); + + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); + + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + + + return 0; +} + +static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union mgmt_port_ring_entry re; + unsigned long flags; + + re.d64 = 0; + re.s.len = skb->len; + re.s.addr = dma_map_single(p->dev, skb->data, + skb->len, + DMA_TO_DEVICE); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + if (unlikely(p->tx_current_fill >= + ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + __skb_queue_tail(&p->tx_list, skb); + + /* Put it in the ring. */ + p->tx_ring[p->tx_next] = re.d64; + p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; + p->tx_current_fill++; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_sync_single_for_device(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + /* Ring the bell. */ + cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); + + netdev->trans_start = jiffies; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_update_tx_stats(netdev); + return NETDEV_TX_OK; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void octeon_mgmt_poll_controller(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + octeon_mgmt_receive_packets(p, 16); + octeon_mgmt_update_rx_stats(netdev); + return; +} +#endif + +static void octeon_mgmt_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strncpy(info->driver, DRV_NAME, sizeof(info->driver)); + strncpy(info->version, DRV_VERSION, sizeof(info->version)); + strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); + info->n_stats = 0; + info->testinfo_len = 0; + info->regdump_len = 0; + info->eedump_len = 0; +} + +static int octeon_mgmt_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (p->phydev) + return phy_ethtool_gset(p->phydev, cmd); + + return -EINVAL; +} + +static int octeon_mgmt_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_ethtool_sset(p->phydev, cmd); + + return -EINVAL; +} + +static const struct ethtool_ops octeon_mgmt_ethtool_ops = { + .get_drvinfo = octeon_mgmt_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = octeon_mgmt_get_settings, + .set_settings = octeon_mgmt_set_settings +}; + +static const struct net_device_ops octeon_mgmt_ops = { + .ndo_open = octeon_mgmt_open, + .ndo_stop = octeon_mgmt_stop, + .ndo_start_xmit = octeon_mgmt_xmit, + .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, + .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, + .ndo_set_mac_address = octeon_mgmt_set_mac_address, + .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_change_mtu = octeon_mgmt_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = octeon_mgmt_poll_controller, +#endif +}; + +static int __init octeon_mgmt_probe(struct platform_device *pdev) +{ + struct resource *res_irq; + struct net_device *netdev; + struct octeon_mgmt *p; + int i; + + netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); + if (netdev == NULL) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, netdev); + p = netdev_priv(netdev); + netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, + OCTEON_MGMT_NAPI_WEIGHT); + + p->netdev = netdev; + p->dev = &pdev->dev; + + p->port = pdev->id; + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + goto err; + + p->irq = res_irq->start; + spin_lock_init(&p->lock); + + skb_queue_head_init(&p->tx_list); + skb_queue_head_init(&p->rx_list); + tasklet_init(&p->tx_clean_tasklet, + octeon_mgmt_clean_tx_tasklet, (unsigned long)p); + + netdev->netdev_ops = &octeon_mgmt_ops; + netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; + + + /* The mgmt ports get the first N MACs. */ + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; + netdev->dev_addr[5] += p->port; + + if (p->port >= octeon_bootinfo->mac_addr_count) + dev_err(&pdev->dev, + "Error %s: Using MAC outside of the assigned range: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + + if (register_netdev(netdev)) + goto err; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + return 0; +err: + free_netdev(netdev); + return -ENOENT; +} + +static int __exit octeon_mgmt_remove(struct platform_device *pdev) +{ + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + + unregister_netdev(netdev); + free_netdev(netdev); + return 0; +} + +static struct platform_driver octeon_mgmt_driver = { + .driver = { + .name = "octeon_mgmt", + .owner = THIS_MODULE, + }, + .probe = octeon_mgmt_probe, + .remove = __exit_p(octeon_mgmt_remove), +}; + +extern void octeon_mdiobus_force_mod_depencency(void); + +static int __init octeon_mgmt_mod_init(void) +{ + /* Force our mdiobus driver module to be loaded first. */ + octeon_mdiobus_force_mod_depencency(); + return platform_driver_register(&octeon_mgmt_driver); +} + +static void __exit octeon_mgmt_mod_exit(void) +{ + platform_driver_unregister(&octeon_mgmt_driver); +} + +module_init(octeon_mgmt_mod_init); +module_exit(octeon_mgmt_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); -- cgit v1.2.1 From 45465487897a1c6d508b14b904dc5777f7ec7e04 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Mon, 21 Dec 2009 14:37:26 -0800 Subject: kfifo: move struct kfifo in place This is a new generic kernel FIFO implementation. The current kernel fifo API is not very widely used, because it has to many constrains. Only 17 files in the current 2.6.31-rc5 used it. FIFO's are like list's a very basic thing and a kfifo API which handles the most use case would save a lot of development time and memory resources. I think this are the reasons why kfifo is not in use: - The API is to simple, important functions are missing - A fifo can be only allocated dynamically - There is a requirement of a spinlock whether you need it or not - There is no support for data records inside a fifo So I decided to extend the kfifo in a more generic way without blowing up the API to much. The new API has the following benefits: - Generic usage: For kernel internal use and/or device driver. - Provide an API for the most use case. - Slim API: The whole API provides 25 functions. - Linux style habit. - DECLARE_KFIFO, DEFINE_KFIFO and INIT_KFIFO Macros - Direct copy_to_user from the fifo and copy_from_user into the fifo. - The kfifo itself is an in place member of the using data structure, this save an indirection access and does not waste the kernel allocator. - Lockless access: if only one reader and one writer is active on the fifo, which is the common use case, no additional locking is necessary. - Remove spinlock - give the user the freedom of choice what kind of locking to use if one is required. - Ability to handle records. Three type of records are supported: - Variable length records between 0-255 bytes, with a record size field of 1 bytes. - Variable length records between 0-65535 bytes, with a record size field of 2 bytes. - Fixed size records, which no record size field. - Preserve memory resource. - Performance! - Easy to use! This patch: Since most users want to have the kfifo as part of another object, reorganize the code to allow including struct kfifo in another data structure. This requires changing the kfifo_alloc and kfifo_init prototypes so that we pass an existing kfifo pointer into them. This patch changes the implementation and all existing users. [akpm@linux-foundation.org: fix warning] Signed-off-by: Stefani Seibold Acked-by: Greg Kroah-Hartman Acked-by: Mauro Carvalho Chehab Acked-by: Andi Kleen Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/libertas/cmd.c | 4 ++-- drivers/net/wireless/libertas/dev.h | 4 ++-- drivers/net/wireless/libertas/main.c | 16 +++++++--------- 3 files changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index b9b371bfa30f..ffed17f4f506 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) priv->dnld_sent = DNLD_RES_RECEIVED; /* If nothing to do, go back to sleep (?) */ - if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) + if (!__kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) priv->psstate = PS_STATE_SLEEP; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) } /* Pending events or command responses? */ - if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) { + if (__kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { allowed = 0; lbs_deb_host("pending events or command responses\n"); } diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6a8d2b291d8c..05bb298dfae9 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -10,7 +10,7 @@ #include "scan.h" #include "assoc.h" - +#include /** sleep_params */ struct sleep_params { @@ -120,7 +120,7 @@ struct lbs_private { u32 resp_len[2]; /* Events sent from hardware to driver */ - struct kfifo *event_fifo; + struct kfifo event_fifo; /** thread to service interrupts */ struct task_struct *main_thread; diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index db38a5a719fa..403909287414 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -459,7 +459,7 @@ static int lbs_thread(void *data) else if (!list_empty(&priv->cmdpendingq) && !(priv->wakeup_dev_required)) shouldsleep = 0; /* We have a command to send */ - else if (__kfifo_len(priv->event_fifo)) + else if (__kfifo_len(&priv->event_fifo)) shouldsleep = 0; /* We have an event to process */ else shouldsleep = 1; /* No command */ @@ -511,9 +511,9 @@ static int lbs_thread(void *data) /* Process hardware events, e.g. card removed, link lost */ spin_lock_irq(&priv->driver_lock); - while (__kfifo_len(priv->event_fifo)) { + while (__kfifo_len(&priv->event_fifo)) { u32 event; - __kfifo_get(priv->event_fifo, (unsigned char *) &event, + __kfifo_get(&priv->event_fifo, (unsigned char *) &event, sizeof(event)); spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); @@ -883,10 +883,9 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->resp_len[0] = priv->resp_len[1] = 0; /* Create the event FIFO */ - priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); - if (IS_ERR(priv->event_fifo)) { + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL, NULL); + if (ret) { lbs_pr_err("Out of memory allocating event FIFO buffer\n"); - ret = -ENOMEM; goto out; } @@ -901,8 +900,7 @@ static void lbs_free_adapter(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); lbs_free_cmd_buffer(priv); - if (priv->event_fifo) - kfifo_free(priv->event_fifo); + kfifo_free(&priv->event_fifo); del_timer(&priv->command_timer); del_timer(&priv->auto_deepsleep_timer); kfree(priv->networks); @@ -1177,7 +1175,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event) if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; - __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + __kfifo_put(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); wake_up_interruptible(&priv->waitq); -- cgit v1.2.1 From c1e13f25674ed564948ecb7dfe5f83e578892896 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Mon, 21 Dec 2009 14:37:27 -0800 Subject: kfifo: move out spinlock Move the pointer to the spinlock out of struct kfifo. Most users in tree do not actually use a spinlock, so the few exceptions now have to call kfifo_{get,put}_locked, which takes an extra argument to a spinlock. Signed-off-by: Stefani Seibold Acked-by: Greg Kroah-Hartman Acked-by: Mauro Carvalho Chehab Acked-by: Andi Kleen Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/libertas/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 403909287414..2cc7ecd8d123 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -883,7 +883,7 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->resp_len[0] = priv->resp_len[1] = 0; /* Create the event FIFO */ - ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL, NULL); + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); if (ret) { lbs_pr_err("Out of memory allocating event FIFO buffer\n"); goto out; -- cgit v1.2.1 From e64c026dd09b73faf20707711402fc5ed55a8e70 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Mon, 21 Dec 2009 14:37:28 -0800 Subject: kfifo: cleanup namespace change name of __kfifo_* functions to kfifo_*, because the prefix __kfifo should be reserved for internal functions only. Signed-off-by: Stefani Seibold Acked-by: Greg Kroah-Hartman Acked-by: Mauro Carvalho Chehab Acked-by: Andi Kleen Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/libertas/cmd.c | 4 ++-- drivers/net/wireless/libertas/main.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index ffed17f4f506..42611bea76a3 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) priv->dnld_sent = DNLD_RES_RECEIVED; /* If nothing to do, go back to sleep (?) */ - if (!__kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) + if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) priv->psstate = PS_STATE_SLEEP; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) } /* Pending events or command responses? */ - if (__kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { + if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { allowed = 0; lbs_deb_host("pending events or command responses\n"); } diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 2cc7ecd8d123..0622104f0a03 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -459,7 +459,7 @@ static int lbs_thread(void *data) else if (!list_empty(&priv->cmdpendingq) && !(priv->wakeup_dev_required)) shouldsleep = 0; /* We have a command to send */ - else if (__kfifo_len(&priv->event_fifo)) + else if (kfifo_len(&priv->event_fifo)) shouldsleep = 0; /* We have an event to process */ else shouldsleep = 1; /* No command */ @@ -511,9 +511,9 @@ static int lbs_thread(void *data) /* Process hardware events, e.g. card removed, link lost */ spin_lock_irq(&priv->driver_lock); - while (__kfifo_len(&priv->event_fifo)) { + while (kfifo_len(&priv->event_fifo)) { u32 event; - __kfifo_get(&priv->event_fifo, (unsigned char *) &event, + kfifo_get(&priv->event_fifo, (unsigned char *) &event, sizeof(event)); spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); @@ -1175,7 +1175,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event) if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; - __kfifo_put(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + kfifo_put(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); wake_up_interruptible(&priv->waitq); -- cgit v1.2.1 From 7acd72eb85f1c7a15e8b5eb554994949241737f1 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Mon, 21 Dec 2009 14:37:28 -0800 Subject: kfifo: rename kfifo_put... into kfifo_in... and kfifo_get... into kfifo_out... rename kfifo_put... into kfifo_in... to prevent miss use of old non in kernel-tree drivers ditto for kfifo_get... -> kfifo_out... Improve the prototypes of kfifo_in and kfifo_out to make the kerneldoc annotations more readable. Add mini "howto porting to the new API" in kfifo.h Signed-off-by: Stefani Seibold Acked-by: Greg Kroah-Hartman Acked-by: Mauro Carvalho Chehab Acked-by: Andi Kleen Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/libertas/main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 0622104f0a03..2bcfa745524a 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -513,7 +513,8 @@ static int lbs_thread(void *data) spin_lock_irq(&priv->driver_lock); while (kfifo_len(&priv->event_fifo)) { u32 event; - kfifo_get(&priv->event_fifo, (unsigned char *) &event, + + kfifo_out(&priv->event_fifo, (unsigned char *) &event, sizeof(event)); spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); @@ -1175,7 +1176,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event) if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; - kfifo_put(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); wake_up_interruptible(&priv->waitq); -- cgit v1.2.1 From 9842c38e917636fa7dc6b88aff17a8f1fd7f0cc0 Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Mon, 21 Dec 2009 14:37:29 -0800 Subject: kfifo: fix warn_unused_result Fix the "ignoring return value of '...', declared with attribute warn_unused_result" compiler warning in several users of the new kfifo API. It removes the __must_check attribute from kfifo_in() and kfifo_in_locked() which must not necessary performed. Fix the allocation bug in the nozomi driver file, by moving out the kfifo_alloc from the interrupt handler into the probe function. Fix the kfifo_out() and kfifo_out_locked() users to handle a unexpected end of fifo. Signed-off-by: Stefani Seibold Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/libertas/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 2bcfa745524a..c2975c8e2f21 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -514,8 +514,10 @@ static int lbs_thread(void *data) while (kfifo_len(&priv->event_fifo)) { u32 event; - kfifo_out(&priv->event_fifo, (unsigned char *) &event, - sizeof(event)); + if (kfifo_out(&priv->event_fifo, + (unsigned char *) &event, sizeof(event)) != + sizeof(event)) + break; spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); spin_lock_irq(&priv->driver_lock); -- cgit v1.2.1 From 1ae861e652b5457e7fa98ccbc55abea1e207916e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 31 Dec 2009 12:15:54 +0100 Subject: PCI/PM: Use per-device D3 delays It turns out that some PCI devices require extra delays when changing power state from D3 to D0 (and the other way around). Although this is against the PCI specification, we can handle it quite easily by allowing drivers to define arbitrary D3 delays for devices known to require extra time for switching power states. Introduce additional field d3_delay in struct pci_dev and use it to store the value of the device's D0->D3 delay, in miliseconds. Make the PCI PM core code use the per-device d3_delay unless pci_pm_d3_delay is greater (in which case the latter is used). [This also allows the driver to specify d3_delay shorter than the 10 ms required by the PCI standard if the device is known to be able to handle that.] Make the sky2 driver set d3_delay to 150 for devices handled by it. Fixes http://bugzilla.kernel.org/show_bug.cgi?id=14730 which is a listed regression from 2.6.30. Signed-off-by: Rafael J. Wysocki Signed-off-by: Jesse Barnes --- drivers/net/sky2.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 1c01b96c9611..2d28d58200d0 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4684,6 +4684,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); + pdev->d3_delay = 150; return 0; -- cgit v1.2.1 From dc2f9c5a13de4f9fd63f49f54add40b2924f66cd Mon Sep 17 00:00:00 2001 From: Li Jie Date: Thu, 31 Dec 2009 16:03:16 +0100 Subject: ARM: 5865/1: nuc900 ethernet driver needs mii nuc900 ethernet driver uses mii_xx_xx serials api, so mii module should be selected. Signed-off-by: lijie Acked-by: Wan ZongShun Signed-off-by: Russell King --- drivers/net/arm/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index c37ee9e6b67b..39e1c0d39476 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -68,6 +68,7 @@ config W90P910_ETH tristate "Nuvoton w90p910 Ethernet support" depends on ARM && ARCH_W90X900 select PHYLIB + select MII help Say Y here if you want to use built-in Ethernet ports on w90p910 processor. -- cgit v1.2.1 From b4f77264cd1a858ee09da8dba5a2711a649adbf3 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 6 Jan 2010 12:54:39 -0800 Subject: mlx4_core: Fix cleanup in __mlx4_init_one() error path If mlx4_init_port_info() fails, cleanup the initialized ports only. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/mlx4/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 291a505fd4fc..3cf56d90d859 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_port: - for (port = 1; port <= dev->caps.num_ports; port++) + for (--port; port >= 1; --port) mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_mcg_table(dev); -- cgit v1.2.1