diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-07-04 20:24:57 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-07-04 20:24:57 -0400 |
commit | 9c5ef0fbfa0b0be219290b05a39135b957479251 (patch) | |
tree | deacd1ff7238ed0faf6a5d90f816e3135774b63d /net/core/netpoll.c | |
parent | 8f1a866fc6831f13593fae6194e3150d45976628 (diff) | |
parent | 190045d53b9a8341e8600d6eb468b6081e903afb (diff) | |
download | talos-obmc-linux-9c5ef0fbfa0b0be219290b05a39135b957479251.tar.gz talos-obmc-linux-9c5ef0fbfa0b0be219290b05a39135b957479251.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r-- | net/core/netpoll.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 758dafe284c0..cf40ff91ac01 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -72,7 +72,8 @@ static void queue_process(struct work_struct *work) netif_tx_unlock(dev); local_irq_restore(flags); - schedule_delayed_work(&npinfo->tx_work, HZ/10); + if (atomic_read(&npinfo->refcnt)) + schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } netif_tx_unlock(dev); @@ -250,22 +251,23 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) unsigned long flags; local_irq_save(flags); - if (netif_tx_trylock(dev)) { - /* try until next clock tick */ - for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; - tries > 0; --tries) { + /* try until next clock tick */ + for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; + tries > 0; --tries) { + if (netif_tx_trylock(dev)) { if (!netif_queue_stopped(dev)) status = dev->hard_start_xmit(skb, dev); + netif_tx_unlock(dev); if (status == NETDEV_TX_OK) break; - /* tickle device maybe there is some cleanup */ - netpoll_poll(np); - - udelay(USEC_PER_POLL); } - netif_tx_unlock(dev); + + /* tickle device maybe there is some cleanup */ + netpoll_poll(np); + + udelay(USEC_PER_POLL); } local_irq_restore(flags); } @@ -784,9 +786,15 @@ void netpoll_cleanup(struct netpoll *np) if (atomic_dec_and_test(&npinfo->refcnt)) { skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->txq); - cancel_rearming_delayed_work(&npinfo->tx_work); + cancel_delayed_work(&npinfo->tx_work); flush_scheduled_work(); + /* clean after last, unfinished work */ + if (!skb_queue_empty(&npinfo->txq)) { + struct sk_buff *skb; + skb = __skb_dequeue(&npinfo->txq); + kfree_skb(skb); + } kfree(npinfo); } } |