From 3132d2827d92c2ee47fdf4dbec75bba0a2f291cb Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 5 May 2012 02:31:23 +0100 Subject: sfc: Fix division by zero when using one RX channel and no SR-IOV If RSS is disabled on the PF (efx->n_rx_channels == 1) we try to set up the indirection table so that VFs can use it, setting efx->rss_spread = efx_vf_size(efx). But if SR-IOV was disabled at compile time, this evaluates to 0 and we end up dividing by zero when initialising the table. I considered changing the fallback definition of efx_vf_size() to return 1, but its value is really meaningless if we are not going to enable VFs. Therefore add a condition of efx_sriov_wanted(efx) in efx_probe_interrupts(). Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/efx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cbfbffe3f00..4a0005342e65 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* RSS might be usable on VFs even if it is disabled on the PF */ - efx->rss_spread = (efx->n_rx_channels > 1 ? + efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? efx->n_rx_channels : efx_vf_size(efx)); return 0; -- cgit v1.2.1 From 477206a018f902895bfcd069dd820bfe94c187b1 Mon Sep 17 00:00:00 2001 From: Julien Ducourthial Date: Wed, 9 May 2012 00:00:06 +0200 Subject: r8169: fix unsigned int wraparound with TSO The r8169 may get stuck or show bad behaviour after activating TSO : the net_device is not stopped when it has no more TX descriptors. This problem comes from TX_BUFS_AVAIL which may reach -1 when all transmit descriptors are in use. The patch simply tries to keep positive values. Tested with 8111d(onboard) on a D510MO, and with 8111e(onboard) on a Zotac 890GXITX. Signed-off-by: Julien Ducourthial Acked-by: Francois Romieu Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f54509377efa..ce6b44d1f252 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -61,8 +61,12 @@ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) -#define TX_BUFFS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 opts[2]; int frags; - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } @@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, mmiowb(); - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ @@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, * can't. */ smp_mb(); - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) netif_wake_queue(dev); } @@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) */ smp_mb(); if (netif_queue_stopped(dev) && - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { netif_wake_queue(dev); } /* -- cgit v1.2.1 From cfb8c3aa59302636c69890be10b2ef23a7ca83b2 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Thu, 10 May 2012 15:38:37 +0000 Subject: igb: fix rtnl race in PM resume path Since the caller (PM resume code) is not the one holding rtnl, when taking the 'else' branch rtnl may be released at any moment, thereby defeating the whole purpose of this code block. Signed-off-by: Benjamin Poirier Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d22350055285..8683ca4748c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1111,9 +1111,12 @@ msi_only: adapter->flags |= IGB_FLAG_HAS_MSI; out: /* Notify the stack of the (possibly) reduced queue counts. */ + rtnl_lock(); netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + rtnl_unlock(); + return err; } /** @@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (!rtnl_is_locked()) { - /* - * shut up ASSERT_RTNL() warning in - * netif_set_real_num_tx/rx_queues. - */ - rtnl_lock(); - err = igb_init_interrupt_scheme(adapter); - rtnl_unlock(); - } else { - err = igb_init_interrupt_scheme(adapter); - } - if (err) { + if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } -- cgit v1.2.1 From 380ec964bc19f865af70c0339dff1cb75dc4f8f2 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Thu, 10 May 2012 04:00:53 +0000 Subject: ehea: fix losing of NEQ events when one event occurred early The NEQ interrupt is only triggered when there was no previous pending interrupt. If we request irq handling after an interrupt has occurred, we will never get an interrupt until we call H_RESET_EVENTS. Events seem to be cleared when we first register the NEQ. So, when we requested irq handling right after registering it, a possible race with an interrupt was much less likely. Now, there is a chance we may lose this race and never get any events. The fix here is to poll and acknowledge any events that might have happened right after registering the irq handler. Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index c9069a28832b..f4d2da0db1b1 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3335,6 +3335,8 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, goto out_shutdown_ports; } + /* Handle any events that might be pending. */ + tasklet_hi_schedule(&adapter->neq_tasklet); ret = 0; goto out; -- cgit v1.2.1 From 062e55e3960062fc2fb62a7274b4c253003eba73 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 10 May 2012 12:51:30 +0000 Subject: ks8851: Update link status during link change interrupt If a link change interrupt comes in we just clear the interrupt and continue along without notifying the upper networking layers that the link has changed. Use the mii_check_link() function to update the link status whenever a link change interrupt occurs. Cc: Ben Dooks Signed-off-by: Stephen Boyd Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8851.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index f8dda009d3c0..5e313e9a252f 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work) netif_dbg(ks, intr, ks->netdev, "%s: status 0x%04x\n", __func__, status); - if (status & IRQ_LCI) { - /* should do something about checking link status */ + if (status & IRQ_LCI) handled |= IRQ_LCI; - } if (status & IRQ_LDI) { u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); @@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work) mutex_unlock(&ks->lock); + if (status & IRQ_LCI) + mii_check_link(&ks->mii); + if (status & IRQ_TXI) netif_wake_queue(ks->netdev); -- cgit v1.2.1 From 3ab77bf271e6a41512e366dfa5110edb981ed1d3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 14 May 2012 09:26:06 +0000 Subject: pch_gbe: fix transmit races Andy reported pch_gbe triggered "NETDEV WATCHDOG" errors. May 11 11:06:09 kontron kernel: WARNING: at net/sched/sch_generic.c:261 dev_watchdog+0x1ec/0x200() (Not tainted) May 11 11:06:09 kontron kernel: Hardware name: N/A May 11 11:06:09 kontron kernel: NETDEV WATCHDOG: eth0 (pch_gbe): transmit queue 0 timed out It seems pch_gbe has a racy tx path (races with TX completion path) Remove tx_queue_lock lock since it has no purpose, we must use tx_lock instead. Signed-off-by: Eric Dumazet Reported-by: Andy Cress Tested-by: Andy Cress Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 -- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 25 ++++++++++------------ 2 files changed, 11 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index dd14915f54bb..ba781747d174 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -584,7 +584,6 @@ struct pch_gbe_hw_stats { /** * struct pch_gbe_adapter - board specific private data structure * @stats_lock: Spinlock structure for status - * @tx_queue_lock: Spinlock structure for transmit * @ethtool_lock: Spinlock structure for ethtool * @irq_sem: Semaphore for interrupt * @netdev: Pointer of network device structure @@ -609,7 +608,6 @@ struct pch_gbe_hw_stats { struct pch_gbe_adapter { spinlock_t stats_lock; - spinlock_t tx_queue_lock; spinlock_t ethtool_lock; atomic_t irq_sem; struct net_device *netdev; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 8035e5ff6e06..1e38d502a062 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -640,14 +640,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) */ static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) { - int size; - - size = (int)sizeof(struct pch_gbe_tx_ring); - adapter->tx_ring = kzalloc(size, GFP_KERNEL); + adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; - size = (int)sizeof(struct pch_gbe_rx_ring); - adapter->rx_ring = kzalloc(size, GFP_KERNEL); + + adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); return -ENOMEM; @@ -1162,7 +1159,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, struct sk_buff *tmp_skb; unsigned int frame_ctrl; unsigned int ring_num; - unsigned long flags; /*-- Set frame control --*/ frame_ctrl = 0; @@ -1211,14 +1207,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, } } } - spin_lock_irqsave(&tx_ring->tx_lock, flags); + ring_num = tx_ring->next_to_use; if (unlikely((ring_num + 1) == tx_ring->count)) tx_ring->next_to_use = 0; else tx_ring->next_to_use = ring_num + 1; - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + buffer_info = &tx_ring->buffer_info[ring_num]; tmp_skb = buffer_info->skb; @@ -1518,7 +1514,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, &rx_ring->rx_buff_pool_logic, GFP_KERNEL); if (!rx_ring->rx_buff_pool) { - pr_err("Unable to allocate memory for the receive poll buffer\n"); + pr_err("Unable to allocate memory for the receive pool buffer\n"); return -ENOMEM; } memset(rx_ring->rx_buff_pool, 0, size); @@ -1637,15 +1633,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", cleaned_count); /* Recover from running out of Tx resources in xmit_frame */ + spin_lock(&tx_ring->tx_lock); if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { netif_wake_queue(adapter->netdev); adapter->stats.tx_restart_count++; pr_debug("Tx wake queue\n"); } - spin_lock(&adapter->tx_queue_lock); + tx_ring->next_to_clean = i; - spin_unlock(&adapter->tx_queue_lock); + pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); + spin_unlock(&tx_ring->tx_lock); return cleaned; } @@ -2037,7 +2035,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) return -ENOMEM; } spin_lock_init(&adapter->hw.miim_lock); - spin_lock_init(&adapter->tx_queue_lock); spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->ethtool_lock); atomic_set(&adapter->irq_sem, 0); @@ -2142,10 +2139,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->next_to_use, tx_ring->next_to_clean); return NETDEV_TX_BUSY; } - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); /* CRC,ITAG no support */ pch_gbe_tx_queue(adapter, tx_ring, skb); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } -- cgit v1.2.1