diff options
author | Olaf Kirch <olaf.kirch@oracle.com> | 2007-07-11 19:32:02 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-07-11 19:32:02 -0700 |
commit | 29578624e354f56143d92510fff33a8b2aaa2c03 (patch) | |
tree | 67d0f28264dbeacedde4882f1e76157894ba33ba | |
parent | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 (diff) | |
download | blackbird-op-linux-29578624e354f56143d92510fff33a8b2aaa2c03.tar.gz blackbird-op-linux-29578624e354f56143d92510fff33a8b2aaa2c03.zip |
[NET]: Fix races in net_rx_action vs netpoll.
Keep netpoll/poll_napi from messing with the poll_list.
Only net_rx_action is allowed to manipulate the list.
Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 8 |
2 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8590d685d935..79cc3dab4be7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -261,6 +261,8 @@ enum netdev_state_t __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_QDISC_RUNNING, + /* Set by the netpoll NAPI code */ + __LINK_STATE_POLL_LIST_FROZEN, }; @@ -1014,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev) { unsigned long flags; +#ifdef CONFIG_NETPOLL + /* Prevent race with netpoll - yes, this is a kludge. + * But at least it doesn't penalize the non-netpoll + * code path. */ + if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) + return; +#endif + local_irq_save(flags); __netif_rx_complete(dev); local_irq_restore(flags); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1b26aa5720..d1264e9a50a8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -124,6 +124,13 @@ static void poll_napi(struct netpoll *np) if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && npinfo->poll_owner != smp_processor_id() && spin_trylock(&npinfo->poll_lock)) { + /* When calling dev->poll from poll_napi, we may end up in + * netif_rx_complete. However, only the CPU to which the + * device was queued is allowed to remove it from poll_list. + * Setting POLL_LIST_FROZEN tells netif_rx_complete + * to leave the NAPI state alone. + */ + set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); @@ -131,6 +138,7 @@ static void poll_napi(struct netpoll *np) atomic_dec(&trapped); npinfo->rx_flags &= ~NETPOLL_RX_DROP; + clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); spin_unlock(&npinfo->poll_lock); } } |