From a0d3bea3cf6c7c1b53a46432bd490b5dc784ca42 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 11 Aug 2005 16:05:50 -0700 Subject: [NET]: Make skb->protocol __be16 There are many instances of skb->protocol = htons(ETH_P_*); skb->protocol = __constant_htons(ETH_P_*); and skb->protocol = *_type_trans(...); Most of *_type_trans() are already endian-annotated, so, let's shift attention on other warnings. Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0061c9470482..948527e42a60 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -255,7 +255,7 @@ struct sk_buff { nohdr:1; /* 3 bits spare */ __u8 pkt_type; - __u16 protocol; + __be16 protocol; void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER -- cgit v1.2.1 From 11513128bb66b0b09d5d0df069b58afdb01752a2 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Thu, 11 Aug 2005 19:23:04 -0700 Subject: [NETPOLL]: rx_flags bugfix Initialize npinfo->rx_flags. The way it stands now, this will have random garbage, and so will incur a locking penalty even when an rx_hook isn't registered and we are not active in the netpoll polling code. Signed-off-by: Jeff Moyer Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- net/core/netpoll.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c327c9edadc5..895f3efc65aa 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -639,6 +639,7 @@ int netpoll_setup(struct netpoll *np) if (!npinfo) goto release; + npinfo->rx_flags = 0; npinfo->rx_np = NULL; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_owner = -1; -- cgit v1.2.1 From a636e1357911afdea7c8344ee65f78d36caf3c16 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Thu, 11 Aug 2005 19:23:50 -0700 Subject: [NETPOLL]: deadlock bugfix This fixes an obvious deadlock in the netpoll code. netpoll_rx takes the npinfo->rx_lock. netpoll_rx is also the only caller of arp_reply (through __netpoll_rx). As such, it is not necessary to take this lock. Signed-off-by: Jeff Moyer Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- net/core/netpoll.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 895f3efc65aa..b9d9da082af2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -353,11 +353,8 @@ static void arp_reply(struct sk_buff *skb) struct sk_buff *send_skb; struct netpoll *np = NULL; - spin_lock_irqsave(&npinfo->rx_lock, flags); if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) np = npinfo->rx_np; - spin_unlock_irqrestore(&npinfo->rx_lock, flags); - if (!np) return; -- cgit v1.2.1 From 6b0b31572985c2e64f7216c798766302fb782281 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:24:33 -0700 Subject: [NETPOLL]: e1000 netpoll tweak Suggested by Steven Rostedt, matches his patch included in e100. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- drivers/net/e1000/e1000_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5e5d2c3c7ce4..b82fd15d0891 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); + e1000_clean_tx_irq(adapter); enable_irq(adapter->pdev->irq); } #endif -- cgit v1.2.1 From f0d3459d0722782c7d9d0e35a1ed0815e75fcde5 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:25:11 -0700 Subject: [NETPOLL]: netpoll_send_skb simplify Minor netpoll_send_skb restructuring Restructure to avoid confusing goto and move some bits out of the retry loop. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- net/core/netpoll.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b9d9da082af2..59ed186e4f46 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -248,14 +248,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) int status; struct netpoll_info *npinfo; -repeat: - if(!np || !np->dev || !netif_running(np->dev)) { + if (!np || !np->dev || !netif_running(np->dev)) { __kfree_skb(skb); return; } - /* avoid recursion */ npinfo = np->dev->npinfo; + + /* avoid recursion */ if (npinfo->poll_owner == smp_processor_id() || np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) @@ -265,29 +265,31 @@ repeat: return; } - spin_lock(&np->dev->xmit_lock); - np->dev->xmit_lock_owner = smp_processor_id(); + while (1) { + spin_lock(&np->dev->xmit_lock); + np->dev->xmit_lock_owner = smp_processor_id(); - /* - * network drivers do not expect to be called if the queue is - * stopped. - */ - if (netif_queue_stopped(np->dev)) { + /* + * network drivers do not expect to be called if the queue is + * stopped. + */ + if (netif_queue_stopped(np->dev)) { + np->dev->xmit_lock_owner = -1; + spin_unlock(&np->dev->xmit_lock); + netpoll_poll(np); + continue; + } + + status = np->dev->hard_start_xmit(skb, np->dev); np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); - netpoll_poll(np); - goto repeat; - } - - status = np->dev->hard_start_xmit(skb, np->dev); - np->dev->xmit_lock_owner = -1; - spin_unlock(&np->dev->xmit_lock); + /* success */ + if(!status) + return; - /* transmit busy */ - if(status) { + /* transmit busy */ netpoll_poll(np); - goto repeat; } } -- cgit v1.2.1 From 0db1d6fc1ea051af49ebe03c503d23996a7c5bbb Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:25:54 -0700 Subject: [NETPOLL]: add retry timeout Add limited retry logic to netpoll_send_skb Each time we attempt to send, decrement our per-device retry counter. On every successful send, we reset the counter. We delay 50us between attempts with up to 20000 retries for a total of 1 second. After we've exhausted our retries, subsequent failed attempts will try only once until reset by success. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- include/linux/netpoll.h | 1 + net/core/netpoll.c | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index bcd0ac33f592..be68d94b03d5 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -26,6 +26,7 @@ struct netpoll { struct netpoll_info { spinlock_t poll_lock; int poll_owner; + int tries; int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 59ed186e4f46..d09affdbad3c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -33,6 +33,7 @@ #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) +#define MAX_RETRIES 20000 static DEFINE_SPINLOCK(skb_list_lock); static int nr_skbs; @@ -265,7 +266,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) return; } - while (1) { + do { + npinfo->tries--; spin_lock(&np->dev->xmit_lock); np->dev->xmit_lock_owner = smp_processor_id(); @@ -277,6 +279,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); netpoll_poll(np); + udelay(50); continue; } @@ -285,12 +288,15 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) spin_unlock(&np->dev->xmit_lock); /* success */ - if(!status) + if(!status) { + npinfo->tries = MAX_RETRIES; /* reset */ return; + } /* transmit busy */ netpoll_poll(np); - } + udelay(50); + } while (npinfo->tries > 0); } void netpoll_send_udp(struct netpoll *np, const char *msg, int len) @@ -642,6 +648,7 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_np = NULL; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_owner = -1; + npinfo->tries = MAX_RETRIES; npinfo->rx_lock = SPIN_LOCK_UNLOCKED; } else npinfo = ndev->npinfo; -- cgit v1.2.1 From 2652076507b662fc88ba16c27b59c7bdd9ccd956 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 11 Aug 2005 19:26:42 -0700 Subject: [NETPOLL]: pre-fill skb pool we could do one thing (see the patch below): i think it would be useful to fill up the netlogging skb queue straight at initialization time. Especially if netpoll is used for dumping alone, the system might not be in a situation to fill up the queue at the point of crash, so better be a bit more prepared and keep the pipeline filled. [ I've modified this to be called earlier - mpm ] Signed-off-by: Ingo Molnar Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- net/core/netpoll.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index d09affdbad3c..c02a08da6d42 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -725,6 +725,10 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_np = np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); } + + /* fill up the skb queue */ + refill_skbs(); + /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; -- cgit v1.2.1 From 53fb95d3c14290fd6ee808b221e35493f096246f Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:27:43 -0700 Subject: [NETPOLL]: fix initialization/NAPI race This fixes a race during initialization with the NAPI softirq processing by using an RCU approach. This race was discovered when refill_skbs() was added to the setup code. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- include/linux/netpoll.h | 19 +++++++++++++------ net/core/dev.c | 9 +++++---- net/core/netpoll.c | 3 +++ 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index be68d94b03d5..5ade54a78dbb 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -9,6 +9,7 @@ #include #include +#include #include struct netpoll; @@ -61,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; } -static inline void netpoll_poll_lock(struct net_device *dev) +static inline void *netpoll_poll_lock(struct net_device *dev) { + rcu_read_lock(); /* deal with race on ->npinfo */ if (dev->npinfo) { spin_lock(&dev->npinfo->poll_lock); dev->npinfo->poll_owner = smp_processor_id(); + return dev->npinfo; } + return NULL; } -static inline void netpoll_poll_unlock(struct net_device *dev) +static inline void netpoll_poll_unlock(void *have) { - if (dev->npinfo) { - dev->npinfo->poll_owner = -1; - spin_unlock(&dev->npinfo->poll_lock); + struct netpoll_info *npi = have; + + if (npi) { + npi->poll_owner = -1; + spin_unlock(&npi->poll_lock); } + rcu_read_unlock(); } #else #define netpoll_rx(a) 0 -#define netpoll_poll_lock(a) +#define netpoll_poll_lock(a) 0 #define netpoll_poll_unlock(a) #endif diff --git a/net/core/dev.c b/net/core/dev.c index 52a3bf7ae177..faf59b02c4bf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; int budget = netdev_budget; - + void *have; + local_irq_disable(); while (!list_empty(&queue->poll_list)) { @@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h) dev = list_entry(queue->poll_list.next, struct net_device, poll_list); - netpoll_poll_lock(dev); + have = netpoll_poll_lock(dev); if (dev->quota <= 0 || dev->poll(dev, &budget)) { - netpoll_poll_unlock(dev); + netpoll_poll_unlock(have); local_irq_disable(); list_del(&dev->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); @@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h) else dev->quota = dev->weight; } else { - netpoll_poll_unlock(dev); + netpoll_poll_unlock(have); dev_put(dev); local_irq_disable(); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c02a08da6d42..996787bca17f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -732,6 +732,9 @@ int netpoll_setup(struct netpoll *np) /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; + /* avoid racing with NAPI reading npinfo */ + synchronize_rcu(); + return 0; release: -- cgit v1.2.1 From d7b9dfc8ea43936e6e8eec3040dcf4f110563868 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:28:05 -0700 Subject: [NETPOLL]: remove unused variable Remove unused variable Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- net/core/netpoll.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 996787bca17f..a1a9a7abff50 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -357,7 +357,6 @@ static void arp_reply(struct sk_buff *skb) unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; - unsigned long flags; struct sk_buff *send_skb; struct netpoll *np = NULL; -- cgit v1.2.1