From 013f6579c6e4f9517127a176bfc37bbac0b766cb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 22 Oct 2014 20:06:29 -0700 Subject: i40e: _MASK vs _SHIFT typo in i40e_handle_mdd_event() We accidentally mask by the _SHIFT variable. It means that "event" is always zero. Signed-off-by: Dan Carpenter Tested-by: Jim Young Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed5f1c15fb0f..c3a7f4a4b775 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; - u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; @@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; - u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; -- cgit v1.2.1 From b71e821de50f0ff92f10f33064ee1713e9014158 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 23 Oct 2014 10:25:53 +0200 Subject: drivers: net: xgene: Rewrite buggy loop in xgene_enet_ecc_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c: In function ‘xgene_enet_ecc_init’: drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c:126: warning: ‘data’ may be used uninitialized in this function Depending on the arbitrary value on the stack, the loop may terminate too early, and cause a bogus -ENODEV failure. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index e6d24c210198..c22f32622fa9 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; u32 data; - int i; + int i = 0; xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); - for (i = 0; i < 10 && data != ~0U ; i++) { + do { usleep_range(100, 110); data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); - } + if (data == ~0U) + return 0; + } while (++i < 10); - if (data != ~0U) { - netdev_err(ndev, "Failed to release memory from shutdown\n"); - return -ENODEV; - } - - return 0; + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) -- cgit v1.2.1 From 96e4be06cbfcb8c9c2da7c77bacce0e56b581c0b Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 23 Oct 2014 15:57:26 +0300 Subject: net/mlx5_core: Call synchronize_irq() before freeing EQ buffer After destroying the EQ, the object responsible for generating interrupts, call synchronize_irq() to ensure that any handler routines running on other CPU cores finish execution. Only then free the EQ buffer. This patch solves a very rare case when we get panic on driver unload. The same thing is done when we destroy a CQ which is one of the sources generating interrupts. In the case of CQ we want to avoid completion handlers on a CQ that was destroyed. In the case we do the same to avoid receiving asynchronous events after the EQ has been destroyed and its buffers freed. Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ed53291468f3..a278238a2db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -420,6 +420,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); + synchronize_irq(table->msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; -- cgit v1.2.1 From bf1bac5b7882daa41249f85fbc97828f0597de5c Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 23 Oct 2014 15:57:27 +0300 Subject: net/mlx4_core: Call synchronize_irq() before freeing EQ buffer After moving the EQ ownership to software effectively destroying it, call synchronize_irq() to ensure that any handler routines running on other CPU cores finish execution. Only then free the EQ buffer. The same thing is done when we destroy a CQ which is one of the sources generating interrupts. In the case of CQ we want to avoid completion handlers on a CQ that was destroyed. In the case we do the same to avoid receiving asynchronous events after the EQ has been destroyed and its buffers freed. Signed-off-by: Eli Cohen Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/eq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index a49c9d11d8a5..49290a405903 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, pr_cont("\n"); } } + synchronize_irq(eq->irq); mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) -- cgit v1.2.1 From 2376c879b80c83424a3013834be97fb9fe2d4180 Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Thu, 23 Oct 2014 14:37:30 -0700 Subject: cxgb4 : Improve handling of DCB negotiation or loss thereof Clear out any DCB apps we might have added to kernel table when we lose DCB sync (or IEEE equivalent event). These were previously left behind and not cleaned up correctly. IEEE allows individual components to work independently, so improve check for IEEE completion by specifying individual components. Fixes: 10b0046685ab ("cxgb4: IEEE fixes for DCBx state machine") Signed-off-by: Anish Bhatt Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 48 ++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd679..ee819fd12bd2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev) dcb->dcb_version = FW_PORT_DCB_VER_AUTO; } +static void cxgb4_dcb_cleanup_apps(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + struct port_dcb_info *dcb = &pi->dcb; + struct dcb_app app; + int i, err; + + /* zero priority implies remove */ + app.priority = 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + /* Check if app list is exhausted */ + if (!dcb->app_priority[i].protocolid) + break; + + app.protocol = dcb->app_priority[i].protocolid; + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.selector = dcb->app_priority[i].sel_field + 1; + err = dcb_ieee_setapp(dev, &app); + } else { + app.selector = !!(dcb->app_priority[i].sel_field); + err = dcb_setapp(dev, &app); + } + + if (err) { + dev_err(adap->pdev_dev, + "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", + dcb_ver_array[dcb->dcb_version], app.selector, + app.protocol, -err); + break; + } + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -145,6 +181,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, * state. We need to reset back to a ground state * of incomplete. */ + cxgb4_dcb_cleanup_apps(dev); cxgb4_dcb_state_init(dev); dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; dcb->supported = CXGB4_DCBX_FW_SUPPORT; @@ -833,11 +870,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* Return whether IEEE Data Center Bridging has been negotiated. */ -static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) +static inline int +cxgb4_ieee_negotiation_complete(struct net_device *dev, + enum cxgb4_dcb_fw_msgs dcb_subtype) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; + return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -850,7 +892,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) { int prio; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -872,7 +914,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) { int ret; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; -- cgit v1.2.1 From 3bb062613b1ecbd0c388106f61344d699f7859ec Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Thu, 23 Oct 2014 14:37:31 -0700 Subject: cxgb4 : Handle dcb enable correctly Disabling DCBx in firmware automatically enables DCBx for control via host lldp agents. Wait for an explicit setstate call from an lldp agents to enable DCBx instead. Fixes: 76bcb31efc06 ("cxgb4 : Add DCBx support codebase and dcbnl_ops") Signed-off-by: Anish Bhatt Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 7 ++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index ee819fd12bd2..6fe300e316c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -116,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, /* we're going to use Host DCB */ dcb->state = CXGB4_DCB_STATE_HOST; dcb->supported = CXGB4_DCBX_HOST_SUPPORT; - dcb->enabled = 1; break; } @@ -386,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) { struct port_info *pi = netdev2pinfo(dev); + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { + pi->dcb.enabled = enabled; + return 0; + } + /* Firmware doesn't provide any mechanism to control the DCB state. */ if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3f60070f2519..97683c1c5b69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_DCB struct port_info *pi = netdev_priv(dev); - return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); #else return 0; #endif -- cgit v1.2.1 From 47276fcc2d542e7b15e384c08b1709c1921b06c1 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Fri, 24 Oct 2014 18:51:33 +0530 Subject: drivers: net:cpsw: fix probe_dt when only slave 1 is pinned out when slave 0 has no phy and slave 1 connected to phy, driver probe will fail as there is no phy id present for slave 0 device tree, so continuing even though no phy-id found, also moving mac-id read later to ensure mac-id is read from device tree even when phy-id entry in not found. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 952e1e4764b7..d81b84b5e3df 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2006,7 +2006,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); - return -EINVAL; + goto no_phy_slave; } mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); @@ -2019,6 +2019,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } + +no_phy_slave: mac_addr = of_get_mac_address(slave_node); if (mac_addr) { memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); @@ -2030,14 +2038,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, return ret; } } - - slave_data->phy_if = of_get_phy_mode(slave_node); - if (slave_data->phy_if < 0) { - dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", - i); - return slave_data->phy_if; - } - if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { -- cgit v1.2.1 From 8edf0047f4b8e03d94ef88f5a7dec146cce03a06 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 28 Oct 2014 11:12:00 -0700 Subject: net: systemport: enable RX interrupts after NAPI There is currently a small window during which the SYSTEMPORT adapter enables its RX interrupts without having enabled its NAPI handler, which can result in packets to be discarded during interface bringup. A similar but more serious window exists in bcm_sysport_resume() during which we can have the RDMA engine not fully prepared to receive packets and yet having RX interrupts enabled. Fix this my moving the RX interrupt enable down to bcm_sysport_netif_start() after napi_enable() for the RX path is called, which fixes both call sites: bcm_sysport_open() and bcm_sysport_resume(). Fixes: b02e6d9ba7ad ("net: systemport: add bcm_sysport_netif_{enable,stop}") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9ae36979bdee..7dce91189e51 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1397,6 +1397,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) /* Enable NAPI */ napi_enable(&priv->napi); + /* Enable RX interrupt and TX ring full interrupt */ + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + phy_start(priv->phydev); /* Enable TX interrupts for the 32 TXQs */ @@ -1499,9 +1502,6 @@ static int bcm_sysport_open(struct net_device *dev) if (ret) goto out_free_rx_ring; - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* Turn on TDMA */ ret = tdma_enable_set(priv, 1); if (ret) @@ -1885,9 +1885,6 @@ static int bcm_sysport_resume(struct device *d) netif_device_attach(dev); - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); -- cgit v1.2.1 From 704d33e7006f20f9b4fa7d24a0f08c4b5919b131 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 28 Oct 2014 11:12:01 -0700 Subject: net: systemport: reset UniMAC coming out of a suspend cycle bcm_sysport_resume() was missing an UniMAC reset which can lead to various receive FIFO corruptions coming out of a suspend cycle. If the RX FIFO is stuck, it will deliver corrupted/duplicate packets towards the host CPU interface. This could be reproduced on crowded network and when Wake-on-LAN is enabled for this particular interface because the switch still forwards packets towards the host CPU interface (SYSTEMPORT), and we had to leave the UniMAC RX enable bit on to allow matching MagicPackets. Once we re-enter the resume function, there is a small window during which the UniMAC receive is still enabled, and we start queueing packets, but the RDMA and RBUF engines are not ready, which leads to having packets stuck in the UniMAC RX FIFO, ultimately delivered towards the host CPU as corrupted. Fixes: 40755a0fce17 ("net: systemport: add suspend and resume support") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7dce91189e51..3a6778a667f4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1858,6 +1858,8 @@ static int bcm_sysport_resume(struct device *d) if (!netif_running(dev)) return 0; + umac_reset(priv); + /* We may have been suspended and never received a WOL event that * would turn off MPD detection, take care of that now */ -- cgit v1.2.1 From cd03cf0158449f9f4c19ecb54dfc97d9bd86eeeb Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Mon, 27 Oct 2014 23:22:10 +0530 Subject: cxgb4vf: Replace repetitive pci device ID's with right ones Replaced repetive Device ID's which got added in commit b961f9a48844ecf3 ("cxgb4vf: Remove superfluous "idx" parameter of CH_DEVICE() macro") Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index bfa398d91826..0b42bddaf284 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x480d), /* T480-cr */ CH_DEVICE(0x480e), /* T440-lp-cr */ CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), + CH_DEVICE(0x4881), + CH_DEVICE(0x4882), + CH_DEVICE(0x4883), + CH_DEVICE(0x4884), + CH_DEVICE(0x4885), + CH_DEVICE(0x4886), + CH_DEVICE(0x4887), + CH_DEVICE(0x4888), CH_DEVICE(0x5801), /* T520-cr */ CH_DEVICE(0x5802), /* T522-cr */ CH_DEVICE(0x5803), /* T540-cr */ -- cgit v1.2.1 From 8f4eb70059ee834522ce90a6fce0aa3078c18620 Mon Sep 17 00:00:00 2001 From: Tej Parkash Date: Tue, 28 Oct 2014 01:18:15 -0400 Subject: cnic: Update the rcu_access_pointer() usages 1. Remove the rcu_read_lock/unlock around rcu_access_pointer 2. Replace the rcu_dereference with rcu_access_pointer Signed-off-by: Tej Parkash Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/cnic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 23f23c97c2ad..f05fab65d78a 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, if (l5_cid >= MAX_CM_SK_TBL_SZ) break; - rcu_read_lock(); if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; - rcu_read_unlock(); break; } csk = &cp->csk_tbl[l5_cid]; @@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, } } csk_put(csk); - rcu_read_unlock(); rc = 0; } } @@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); mutex_lock(&cnic_lock); - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { -- cgit v1.2.1 From a22bb0b9b9b09b4cc711f6d577679773e074dde9 Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Wed, 22 Oct 2014 15:29:24 +0000 Subject: e1000: unset IFF_UNICAST_FLT on WMware 82545EM VMWare's e1000 implementation does not seem to support unicast filtering. This can be observed by configuring a macvlan interface on eth0 in a VM in VMWare Fusion 5.0.5, and trying to use that interface instead of eth0. Tested on 3.16. Signed-off-by: Francesco Ruggeri Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5f6aded512f5..24f3986cfae2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SG); - netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); -- cgit v1.2.1 From bc16e47f03a7dce9ad68029b21519265c334eb12 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 23 Oct 2014 03:32:27 +0000 Subject: igb: don't reuse pages with pfmemalloc flag Incoming packet is dropped silently by sk_filter(), if the skb was allocated from pfmemalloc reserves and the corresponding socket is not marked with the SOCK_MEMALLOC flag. Igb driver allocates pages for DMA with __skb_alloc_page(), which calls alloc_pages_node() with the __GFP_MEMALLOC flag. So, in case of OOM condition, igb can get pages with pfmemalloc flag set. If an incoming packet hits the pfmemalloc page and is large enough (small packets are copying into the memory, allocated with netdev_alloc_skb_ip_align(), so they are not affected), it will be dropped. This behavior is ok under high memory pressure, but the problem is that the igb driver reuses these mapped pages. So, packets are still dropping even if all memory issues are gone and there is a plenty of free memory. In my case, some TCP sessions hang on a small percentage (< 0.1%) of machines days after OOMs. Fix this by avoiding reuse of such pages. Signed-off-by: Roman Gushchin Tested-by: Aaron Brown "aaron.f.brown@intel.com" Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a21b14495ebd..a2d72a87cbde 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, if (unlikely(page_to_nid(page) != numa_node_id())) return false; + if (unlikely(page->pfmemalloc)) + return false; + #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) @@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + if (likely((page_to_nid(page) == numa_node_id()) && + !page->pfmemalloc)) return true; /* this page cannot be reused so discard it */ -- cgit v1.2.1 From 4d2fcfbcf8141cdf70245a0c0612b8076f4b7e32 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 22 Oct 2014 15:29:03 +0000 Subject: ixgbe: need not repeat init skb with NULL Signed-off-by: Martin Zhang Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fec5212d4337..d2df4e3d1032 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) IXGBE_CB(skb)->page_released = false; } dev_kfree_skb(skb); + rx_buffer->skb = NULL; } - rx_buffer->skb = NULL; if (rx_buffer->dma) dma_unmap_page(dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), -- cgit v1.2.1 From e3215f0ac77ec23b052cb0bf511143038ac2ad7b Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 28 Oct 2014 05:50:03 +0000 Subject: ixgbe: fix race when setting advertised speed Following commands: modprobe ixgbe ifconfig ethX up ethtool -s ethX advertise 0x020 can lead to "setup link failed with code -14" error due to the setup_link call racing with the SFP detection routine in the watchdog. This patch resolves this issue by protecting the setup_link call with check for __IXGBE_IN_SFP_INIT. Reported-by: Scott Harrison Signed-off-by: Emil Tantilov Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3ce4a258f945..0ae038b9af90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev, if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); hw->mac.ops.setup_link(hw, old, true); } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = ethtool_cmd_speed(ecmd); -- cgit v1.2.1 From e327c225c911529898ec300cb96d2088893de3df Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Wed, 29 Oct 2014 17:54:03 -0700 Subject: cxgb4 : Fix missing initialization of win0_lock win0_lock was being used un-initialized, resulting in warning traces being seen when lock debugging is enabled (and just wrong) Fixes : fc5ab0209650 ('cxgb4: Replaced the backdoor mechanism to access the HW memory with PCIe Window method') Signed-off-by: Anish Bhatt Signed-off-by: Casey Leedom Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 97683c1c5b69..8520d5529df8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6614,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); + spin_lock_init(&adapter->win0_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); -- cgit v1.2.1 From a4f2dacbf2a5045e34b98a35d9a3857800f25a7b Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 30 Oct 2014 15:59:27 +0200 Subject: net/mlx4_en: Don't attempt to TX offload the outer UDP checksum for VXLAN For VXLAN/NVGRE encapsulation, the current HW doesn't support offloading both the outer UDP TX checksum and the inner TCP/UDP TX checksum. The driver doesn't advertize SKB_GSO_UDP_TUNNEL_CSUM, however we are wrongly telling the HW to offload the outer UDP checksum for encapsulated packets, fix that. Fixes: 837052d0ccc5 ('net/mlx4_en: Add netdev support for TCP/IP offloads of vxlan tunneling') Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 34c137878545..454d9fea640e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) * whether LSO is used */ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | - MLX4_WQE_CTRL_TCP_UDP_CSUM); + if (!skb->encapsulation) + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + else + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); ring->tx_csum++; } -- cgit v1.2.1 From 571e1b2c7a4c2fd5faa1648462a6b65fa26530d7 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 30 Oct 2014 15:59:28 +0200 Subject: mlx4: Avoid leaking steering rules on flow creation error flow If mlx4_ib_create_flow() attempts to create > 1 rules with the firmware, and one of these registrations fail, we leaked the already created flow rules. One example of the leak is when the registration of the VXLAN ghost steering rule fails, we didn't unregister the original rule requested by the user, introduced in commit d2fce8a9060d "mlx4: Set user-space raw Ethernet QPs to properly handle VXLAN traffic". While here, add dump of the VXLAN portion of steering rules so it can actually be seen when flow creation fails. Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mcg.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ca0f98c95105..872843179f44 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, cur->ib.dst_gid_msk); break; + case MLX4_NET_TRANS_RULE_ID_VXLAN: + len += snprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; -- cgit v1.2.1 From 7d2911c4381555b31ef0bcae42a0dbf9ade7426e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 30 Oct 2014 09:59:27 -0700 Subject: net: smc91x: Fix gpios for device tree based booting With legacy booting, the platform init code was taking care of the configuring of GPIOs. With device tree based booting, things may or may not work depending what bootloader has configured or if the legacy platform code gets called. Let's add support for the pwrdn and reset GPIOs to the smc91x driver to fix the issues of smc91x not working properly when booted in device tree mode. And let's change n900 to use these settings as some versions of the bootloader do not configure things properly causing errors. Reported-by: Kevin Hilman Signed-off-by: Tony Lindgren Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smc91x.c | 58 ++++++++++++++++++++++++++++++++++++++ drivers/net/ethernet/smsc/smc91x.h | 3 ++ 2 files changed, 61 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5e94d00b96b3..2c62208077fe 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -81,6 +81,7 @@ static const char version[] = #include #include #include +#include #include #include @@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = { {}, }; MODULE_DEVICE_TABLE(of, smc91x_match); + +/** + * of_try_set_control_gpio - configure a gpio if it exists + */ +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + struct gpio_desc *gpio = *desc; + int res; + + gpio = devm_gpiod_get_index(dev, name, index); + if (IS_ERR(gpio)) { + if (PTR_ERR(gpio) == -ENOENT) { + *desc = NULL; + return 0; + } + + return PTR_ERR(gpio); + } + res = gpiod_direction_output(gpio, !value); + if (res) { + dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); + devm_gpiod_put(dev, gpio); + gpio = NULL; + return res; + } + if (nsdelay) + usleep_range(nsdelay, 2 * nsdelay); + gpiod_set_value_cansleep(gpio, value); + *desc = gpio; + + return 0; +} #endif /* @@ -2237,6 +2273,28 @@ static int smc_drv_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; u32 val; + /* Optional pwrdwn GPIO configured? */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, + "power", 0, 0, 100); + if (ret) + return ret; + + /* + * Optional reset GPIO configured? Minimum 100 ns reset needed + * according to LAN91C96 datasheet page 14. + */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, + "reset", 0, 0, 100); + if (ret) + return ret; + + /* + * Need to wait for optional EEPROM to load, max 750 us according + * to LAN91C96 datasheet page 55. + */ + if (lp->reset_gpio) + usleep_range(750, 1000); + /* Combination of IO widths supported, default to 16-bit */ if (!of_property_read_u32(np, "reg-io-width", &val)) { if (val & 1) diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 47dce918eb0f..2a38dacbbd27 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -298,6 +298,9 @@ struct smc_local { struct sk_buff *pending_tx_skb; struct tasklet_struct tx_task; + struct gpio_desc *power_gpio; + struct gpio_desc *reset_gpio; + /* version/revision of the SMC91x chip */ int version; -- cgit v1.2.1 From 1e19e084eae727654052339757ab7f1eaff58bad Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 31 Oct 2014 18:28:03 +0200 Subject: stmmac: pci: set default of the filter bins The commit 3b57de958e2a brought the support for a different amount of the filter bins, but didn't update the PCI driver accordingly. This patch appends the default values when the device is enumerated via PCI bus. Fixes: 3b57de958e2a (net: stmmac: Support devicetree configs for mcast and ucast filter entries) Signed-off-by: Andy Shevchenko Cc: stable@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 655a23bbc451..e17a970eaf2b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg; static void stmmac_default_data(void) { memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); + plat_dat.bus_id = 1; plat_dat.phy_addr = 0; plat_dat.interface = PHY_INTERFACE_MODE_GMII; @@ -47,6 +48,12 @@ static void stmmac_default_data(void) dma_cfg.pbl = 32; dma_cfg.burst_len = DMA_AXI_BLEN_256; plat_dat.dma_cfg = &dma_cfg; + + /* Set default value for multicast hash bins */ + plat_dat.multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat.unicast_filter_entries = 1; } /** -- cgit v1.2.1 From 6f979eb3fcfb4c3f42f230d174db4bbad0080710 Mon Sep 17 00:00:00 2001 From: Lennart Sorensen Date: Fri, 31 Oct 2014 13:28:54 -0400 Subject: drivers: net: cpsw: Fix broken loop condition in switch mode 0d961b3b52f566f823070ce2366511a7f64b928c (drivers: net: cpsw: fix buggy loop condition) accidentally fixed a loop comparison in too many places while fixing a real bug. It was correct to fix the dual_emac mode section since there 'i' is used as an index into priv->slaves which is a 0 based array. However the other two changes (which are only used in switch mode) are wrong since there 'i' is actually the ALE port number, and port 0 is the host port, while port 1 and up are the slave ports. Putting the loop condition back in the switch mode section fixes it. A comment has been added to point out the intent clearly to avoid future confusion. Also a comment is fixed that said the opposite of what was actually happening. Signed-off-by: Len Sorensen Acked-by: Heiko Schocher Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d81b84b5e3df..fd4577d30c5d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) if (enable) { unsigned long timeout = jiffies + HZ; - /* Disable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); dev_dbg(&ndev->dev, "promiscuity enabled\n"); } else { - /* Flood All Unicast Packets to Host port */ + /* Don't Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); - /* Enable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, -- cgit v1.2.1 From 1e5c4bc497c0a96e1ad2974539d353870f2cb0b6 Mon Sep 17 00:00:00 2001 From: Lennart Sorensen Date: Fri, 31 Oct 2014 13:38:52 -0400 Subject: drivers: net: cpsw: Support ALLMULTI and fix IFF_PROMISC in switch mode The cpsw driver did not support the IFF_ALLMULTI flag which makes dynamic multicast routing not work. Related to this, when enabling IFF_PROMISC in switch mode, all registered multicast addresses are flushed, resulting in only broadcast and unicast traffic being received. A new cpsw_ale_set_allmulti function now scans through the ALE entry table and adds/removes the host port from the unregistered multicast port mask of each vlan entry depending on the state of IFF_ALLMULTI. In promiscious mode, cpsw_ale_set_allmulti is used to force reception of all multicast traffic in addition to the unicast and broadcast traffic. With this change dynamic multicast and promiscious mode both work in switch mode. Signed-off-by: Len Sorensen Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 20 ++++++++++++++++++-- drivers/net/ethernet/ti/cpsw_ale.c | 29 +++++++++++++++++++++++++++++ drivers/net/ethernet/ti/cpsw_ale.h | 2 ++ 3 files changed, 49 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fd4577d30c5d..d8794488f80a 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); return; } else { /* Disable promiscuous mode */ cpsw_set_promiscious(ndev, false); } + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); @@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) const int port = priv->host_port; u32 reg; int i; + int unreg_mcast_mask; reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : CPSW2_PORT_VLAN; @@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) for (i = 0; i < priv->data.slaves; i++) slave_write(priv->slaves + i, vlan, reg); + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - (ALE_PORT_1 | ALE_PORT_2) << port); + unreg_mcast_mask << port); } static void cpsw_init_host_port(struct cpsw_priv *priv) @@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { int ret; + int unreg_mcast_mask; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; ret = cpsw_ale_add_vlan(priv->ale, vid, ALE_ALL_PORTS << priv->host_port, 0, ALE_ALL_PORTS << priv->host_port, - (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + unreg_mcast_mask << priv->host_port); if (ret != 0) return ret; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0579b2243bb6..3ae83879a75f 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) return 0; } +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + int unreg_mcast = 0; + + /* Only bother doing the work if the setting is actually changing */ + if (ale->allmulti == allmulti) + return; + + /* Remember the new setting to check against next time */ + ale->allmulti = allmulti; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + if (allmulti) + unreg_mcast |= 1; + else + unreg_mcast &= ~1; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_write(ale, idx, ale_entry); + } +} + struct ale_control_info { const char *name; int offset, port_offset; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 31cf43cab42e..c0d4127aa549 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -27,6 +27,7 @@ struct cpsw_ale { struct cpsw_ale_params params; struct timer_list timer; unsigned long ageout; + int allmulti; }; enum cpsw_ale_control { @@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, -- cgit v1.2.1 From ec1f1276022e4e3ca40871810217d513e39ff250 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 31 Oct 2014 13:43:06 -0400 Subject: sunhme: Add DMA mapping error checks. Reported-by: Meelis Roos Tested-by: Meelis Roos Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunhme.c | 62 +++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72c8525d5457..9c014803b03b 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) HMD(("init rxring, ")); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; + u32 mapping; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { @@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp) /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(hp->dma_dev, mapping)) { + dev_kfree_skb_any(skb); + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), - dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(skb, RX_OFFSET); } @@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; + u32 mapping; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); @@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) drops++; goto drop_it; } + skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, new_skb->data, + RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + dev_kfree_skb_any(new_skb); + drops++; + goto drop_it; + } + dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; - skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), - dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } +static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, + u32 first_len, u32 first_entry, u32 entry) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + + dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); + + first_entry = NEXT_TX(first_entry); + while (first_entry != entry) { + struct happy_meal_txd *this = &txbase[first_entry]; + u32 addr, len; + + addr = hme_read_desc32(hp, &this->tx_addr); + len = hme_read_desc32(hp, &this->tx_flags); + len &= TXFLAG_SIZE; + dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); + } +} + static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) + goto out_dma_error; tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), @@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) + goto out_dma_error; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + unmap_partial_tx_skb(hp, first_mapping, first_len, + first_entry, entry); + goto out_dma_error; + } this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; @@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; + +out_dma_error: + hp->tx_skbs[hp->tx_new] = NULL; + spin_unlock_irq(&hp->happy_lock); + + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) -- cgit v1.2.1