From 9bbc768aa911a3ef336272eaa6d220abfba8ce50 Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 23 Mar 2010 04:07:29 +0100 Subject: netfilter: ipv4: use NFPROTO values for NF_HOOK invocation The semantic patch that was used: // @@ @@ (NF_HOOK |NF_HOOK_COND |nf_hook )( -PF_INET, +NFPROTO_IPV4, ...) // Signed-off-by: Jan Engelhardt --- net/ipv4/ipmr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8582e12e4a62..1d42f6103c8d 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1319,7 +1319,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) * not mrouter) cannot join to more than one interface - it will * result in receiving multiple packets. */ - NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev, + NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, ipmr_forward_finish); return; -- cgit v1.2.3 From e258beb22f4d3ea3dc88586ffc9c990d0eb03380 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 13 Apr 2010 05:03:19 +0000 Subject: ipv4: ipmr: move unres_queue and timer to per-namespace data The unres_queue is currently shared between all namespaces. Following patches will additionally allow to create multiple multicast routing tables in each namespace. Having a single shared queue for all these users seems to excessive, move the queue and the cleanup timer to the per-namespace data to unshare it. As a side-effect, this fixes a bug in the seq file iteration functions: the first entry returned is always from the current namespace, entries returned after that may belong to any namespace. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 2 ++ net/ipv4/ipmr.c | 70 ++++++++++++++++++++---------------------------- 2 files changed, 31 insertions(+), 41 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2764994c9136..b15e518f952a 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -60,6 +60,8 @@ struct netns_ipv4 { #ifdef CONFIG_IP_MROUTE struct sock *mroute_sk; + struct timer_list ipmr_expire_timer; + struct mfc_cache *mfc_unres_queue; struct mfc_cache **mfc_cache_array; struct vif_device *vif_table; int maxvif; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d4f6d1340a4..d6aa65e2b08f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -80,8 +80,6 @@ static DEFINE_RWLOCK(mrt_lock); #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) -static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ - /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -100,8 +98,6 @@ static int ipmr_cache_report(struct net *net, struct sk_buff *pkt, vifi_t vifi, int assert); static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); -static struct timer_list ipmr_expire_timer; - /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) @@ -364,25 +360,26 @@ static void ipmr_destroy_unres(struct mfc_cache *c) } -/* Single timer process for all the unresolved queue. */ +/* Timer process for the unresolved queue. */ -static void ipmr_expire_process(unsigned long dummy) +static void ipmr_expire_process(unsigned long arg) { + struct net *net = (struct net *)arg; unsigned long now; unsigned long expires; struct mfc_cache *c, **cp; if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&ipmr_expire_timer, jiffies+HZ/10); + mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10); return; } - if (mfc_unres_queue == NULL) + if (net->ipv4.mfc_unres_queue == NULL) goto out; now = jiffies; expires = 10*HZ; - cp = &mfc_unres_queue; + cp = &net->ipv4.mfc_unres_queue; while ((c=*cp) != NULL) { if (time_after(c->mfc_un.unres.expires, now)) { @@ -398,8 +395,8 @@ static void ipmr_expire_process(unsigned long dummy) ipmr_destroy_unres(c); } - if (mfc_unres_queue != NULL) - mod_timer(&ipmr_expire_timer, jiffies + expires); + if (net->ipv4.mfc_unres_queue != NULL) + mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires); out: spin_unlock(&mfc_unres_lock); @@ -708,9 +705,8 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); - for (c=mfc_unres_queue; c; c=c->next) { - if (net_eq(mfc_net(c), net) && - c->mfc_mcastgrp == iph->daddr && + for (c=net->ipv4.mfc_unres_queue; c; c=c->next) { + if (c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) break; } @@ -751,10 +747,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) } atomic_inc(&net->ipv4.cache_resolve_queue_len); - c->next = mfc_unres_queue; - mfc_unres_queue = c; + c->next = net->ipv4.mfc_unres_queue; + net->ipv4.mfc_unres_queue = c; - mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); + mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires); } /* @@ -849,18 +845,17 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) * need to send on the frames and tidy up. */ spin_lock_bh(&mfc_unres_lock); - for (cp = &mfc_unres_queue; (uc=*cp) != NULL; + for (cp = &net->ipv4.mfc_unres_queue; (uc=*cp) != NULL; cp = &uc->next) { - if (net_eq(mfc_net(uc), net) && - uc->mfc_origin == c->mfc_origin && + if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { *cp = uc->next; atomic_dec(&net->ipv4.cache_resolve_queue_len); break; } } - if (mfc_unres_queue == NULL) - del_timer(&ipmr_expire_timer); + if (net->ipv4.mfc_unres_queue == NULL) + del_timer(&net->ipv4.ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (uc) { @@ -912,14 +907,9 @@ static void mroute_clean_tables(struct net *net) struct mfc_cache *c, **cp; spin_lock_bh(&mfc_unres_lock); - cp = &mfc_unres_queue; + cp = &net->ipv4.mfc_unres_queue; while ((c = *cp) != NULL) { - if (!net_eq(mfc_net(c), net)) { - cp = &c->next; - continue; - } *cp = c->next; - ipmr_destroy_unres(c); } spin_unlock_bh(&mfc_unres_lock); @@ -1819,11 +1809,10 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, return mfc; read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + it->cache = &net->ipv4.mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) - if (net_eq(mfc_net(mfc), net) && - pos-- == 0) + for (mfc = net->ipv4.mfc_unres_queue; mfc; mfc = mfc->next) + if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -1857,7 +1846,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (mfc->next) return mfc->next; - if (it->cache == &mfc_unres_queue) + if (it->cache == &net->ipv4.mfc_unres_queue) goto end_of_list; BUG_ON(it->cache != net->ipv4.mfc_cache_array); @@ -1870,13 +1859,11 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) /* exhausted cache_array, show unresolved */ read_unlock(&mrt_lock); - it->cache = &mfc_unres_queue; + it->cache = &net->ipv4.mfc_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = mfc_unres_queue; - while (mfc && !net_eq(mfc_net(mfc), net)) - mfc = mfc->next; + mfc = net->ipv4.mfc_unres_queue; if (mfc) return mfc; @@ -1892,7 +1879,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); - if (it->cache == &mfc_unres_queue) + if (it->cache == &net->ipv4.mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); else if (it->cache == net->ipv4.mfc_cache_array) read_unlock(&mrt_lock); @@ -1915,7 +1902,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) (unsigned long) mfc->mfc_origin, mfc->mfc_parent); - if (it->cache != &mfc_unres_queue) { + if (it->cache != &net->ipv4.mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, @@ -1992,6 +1979,9 @@ static int __net_init ipmr_net_init(struct net *net) goto fail_mfc_cache; } + setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process, + (unsigned long)net); + #ifdef CONFIG_IP_PIMSM net->ipv4.mroute_reg_vif_num = -1; #endif @@ -2047,7 +2037,6 @@ int __init ip_mr_init(void) if (err) goto reg_pernet_fail; - setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); err = register_netdevice_notifier(&ip_mr_notifier); if (err) goto reg_notif_fail; @@ -2065,7 +2054,6 @@ add_proto_fail: unregister_netdevice_notifier(&ip_mr_notifier); #endif reg_notif_fail: - del_timer(&ipmr_expire_timer); unregister_pernet_subsys(&ipmr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); -- cgit v1.2.3 From d658f8a0e63b6476148162aa7a3ffffc58dcad52 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 13 Apr 2010 05:03:20 +0000 Subject: ipv4: ipmr: remove net pointer from struct mfc_cache Now that cache entries in unres_queue don't need to be distinguished by their network namespace pointer anymore, we can remove it from struct mfc_cache add pass the namespace as function argument to the functions that need it. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/mroute.h | 15 ------------ net/ipv4/ipmr.c | 65 +++++++++++++++++++++++++------------------------- 2 files changed, 32 insertions(+), 48 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/linux/mroute.h b/include/linux/mroute.h index c5f3d53548e2..de7780a6dd32 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -192,9 +192,6 @@ struct vif_device { struct mfc_cache { struct mfc_cache *next; /* Next entry on cache line */ -#ifdef CONFIG_NET_NS - struct net *mfc_net; -#endif __be32 mfc_mcastgrp; /* Group the entry belongs to */ __be32 mfc_origin; /* Source of packet */ vifi_t mfc_parent; /* Source interface */ @@ -217,18 +214,6 @@ struct mfc_cache { } mfc_un; }; -static inline -struct net *mfc_net(const struct mfc_cache *mfc) -{ - return read_pnet(&mfc->mfc_net); -} - -static inline -void mfc_net_set(struct mfc_cache *mfc, struct net *net) -{ - write_pnet(&mfc->mfc_net, hold_net(net)); -} - #define MFC_STATIC 1 #define MFC_NOTIFY 2 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index d6aa65e2b08f..f8e25c8ba070 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -93,10 +93,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; -static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); +static int ip_mr_forward(struct net *net, struct sk_buff *skb, + struct mfc_cache *cache, int local); static int ipmr_cache_report(struct net *net, struct sk_buff *pkt, vifi_t vifi, int assert); -static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static int ipmr_fill_mroute(struct net *net, struct sk_buff *skb, + struct mfc_cache *c, struct rtmsg *rtm); /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ @@ -325,7 +327,6 @@ static int vif_delete(struct net *net, int vifi, int notify, static inline void ipmr_cache_free(struct mfc_cache *c) { - release_net(mfc_net(c)); kmem_cache_free(mrt_cachep, c); } @@ -333,11 +334,10 @@ static inline void ipmr_cache_free(struct mfc_cache *c) and reporting error to netlink readers. */ -static void ipmr_destroy_unres(struct mfc_cache *c) +static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; - struct net *net = mfc_net(c); atomic_dec(&net->ipv4.cache_resolve_queue_len); @@ -392,7 +392,7 @@ static void ipmr_expire_process(unsigned long arg) *cp = c->next; - ipmr_destroy_unres(c); + ipmr_destroy_unres(net, c); } if (net->ipv4.mfc_unres_queue != NULL) @@ -404,10 +404,10 @@ out: /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) +static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache, + unsigned char *ttls) { int vifi; - struct net *net = mfc_net(cache); cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.maxvif = 0; @@ -547,24 +547,22 @@ static struct mfc_cache *ipmr_cache_find(struct net *net, /* * Allocate a multicast cache entry */ -static struct mfc_cache *ipmr_cache_alloc(struct net *net) +static struct mfc_cache *ipmr_cache_alloc(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c == NULL) return NULL; c->mfc_un.res.minvif = MAXVIFS; - mfc_net_set(c, net); return c; } -static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) +static struct mfc_cache *ipmr_cache_alloc_unres(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c == NULL) return NULL; skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; - mfc_net_set(c, net); return c; } @@ -572,7 +570,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) * A cache entry has gone into a resolved state from queued */ -static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) +static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, + struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; @@ -585,7 +584,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); - if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { + if (ipmr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = (skb_tail_pointer(skb) - (u8 *)nlh); } else { @@ -597,9 +596,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) memset(&e->msg, 0, sizeof(e->msg)); } - rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); + rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else - ip_mr_forward(skb, c, 0); + ip_mr_forward(net, skb, c, 0); } } @@ -717,7 +716,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) */ if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || - (c = ipmr_cache_alloc_unres(net)) == NULL) { + (c = ipmr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); @@ -814,7 +813,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) if (c != NULL) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(c, mfc->mfcc_ttls); + ipmr_update_thresholds(net, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); @@ -824,14 +823,14 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; - c = ipmr_cache_alloc(net); + c = ipmr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(c, mfc->mfcc_ttls); + ipmr_update_thresholds(net, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; @@ -859,7 +858,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) spin_unlock_bh(&mfc_unres_lock); if (uc) { - ipmr_cache_resolve(uc, c); + ipmr_cache_resolve(net, uc, c); ipmr_cache_free(uc); } return 0; @@ -910,7 +909,7 @@ static void mroute_clean_tables(struct net *net) cp = &net->ipv4.mfc_unres_queue; while ((c = *cp) != NULL) { *cp = c->next; - ipmr_destroy_unres(c); + ipmr_destroy_unres(net, c); } spin_unlock_bh(&mfc_unres_lock); } @@ -1221,9 +1220,9 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) * Processing handlers for ipmr_forward */ -static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) +static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb, + struct mfc_cache *c, int vifi) { - struct net *net = mfc_net(c); const struct iphdr *iph = ip_hdr(skb); struct vif_device *vif = &net->ipv4.vif_table[vifi]; struct net_device *dev; @@ -1335,11 +1334,11 @@ static int ipmr_find_vif(struct net_device *dev) /* "local" means that we should preserve one skb (for local delivery) */ -static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) +static int ip_mr_forward(struct net *net, struct sk_buff *skb, + struct mfc_cache *cache, int local) { int psend = -1; int vif, ct; - struct net *net = mfc_net(cache); vif = cache->mfc_parent; cache->mfc_un.res.pkt++; @@ -1396,7 +1395,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(skb2, cache, psend); + ipmr_queue_xmit(net, skb2, cache, psend); } psend = ct; } @@ -1405,9 +1404,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(skb2, cache, psend); + ipmr_queue_xmit(net, skb2, cache, psend); } else { - ipmr_queue_xmit(skb, cache, psend); + ipmr_queue_xmit(net, skb, cache, psend); return 0; } } @@ -1488,7 +1487,7 @@ int ip_mr_input(struct sk_buff *skb) return -ENODEV; } - ip_mr_forward(skb, cache, local); + ip_mr_forward(net, skb, cache, local); read_unlock(&mrt_lock); @@ -1602,11 +1601,11 @@ drop: #endif static int -ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) +ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c, + struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; - struct net *net = mfc_net(c); u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; @@ -1686,7 +1685,7 @@ int ipmr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ipmr_fill_mroute(skb, cache, rtm); + err = ipmr_fill_mroute(net, skb, cache, rtm); read_unlock(&mrt_lock); return err; } -- cgit v1.2.3 From 862465f2e7e90975e7bf0ecfbb171dd3adedd950 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 13 Apr 2010 05:03:21 +0000 Subject: ipv4: ipmr: convert struct mfc_cache to struct list_head Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/mroute.h | 2 +- include/net/netns/ipv4.h | 4 +- net/ipv4/ipmr.c | 125 +++++++++++++++++++++++------------------------ 3 files changed, 64 insertions(+), 67 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/linux/mroute.h b/include/linux/mroute.h index de7780a6dd32..7ff6c77d6008 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -191,7 +191,7 @@ struct vif_device { #define VIFF_STATIC 0x8000 struct mfc_cache { - struct mfc_cache *next; /* Next entry on cache line */ + struct list_head list; __be32 mfc_mcastgrp; /* Group the entry belongs to */ __be32 mfc_origin; /* Source of packet */ vifi_t mfc_parent; /* Source interface */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index b15e518f952a..5d06429968d5 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -61,8 +61,8 @@ struct netns_ipv4 { #ifdef CONFIG_IP_MROUTE struct sock *mroute_sk; struct timer_list ipmr_expire_timer; - struct mfc_cache *mfc_unres_queue; - struct mfc_cache **mfc_cache_array; + struct list_head mfc_unres_queue; + struct list_head *mfc_cache_array; struct vif_device *vif_table; int maxvif; atomic_t cache_resolve_queue_len; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index f8e25c8ba070..21b5edc2f343 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -367,35 +367,32 @@ static void ipmr_expire_process(unsigned long arg) struct net *net = (struct net *)arg; unsigned long now; unsigned long expires; - struct mfc_cache *c, **cp; + struct mfc_cache *c, *next; if (!spin_trylock(&mfc_unres_lock)) { mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10); return; } - if (net->ipv4.mfc_unres_queue == NULL) + if (list_empty(&net->ipv4.mfc_unres_queue)) goto out; now = jiffies; expires = 10*HZ; - cp = &net->ipv4.mfc_unres_queue; - while ((c=*cp) != NULL) { + list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; - cp = &c->next; continue; } - *cp = c->next; - + list_del(&c->list); ipmr_destroy_unres(net, c); } - if (net->ipv4.mfc_unres_queue != NULL) + if (!list_empty(&net->ipv4.mfc_unres_queue)) mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires); out: @@ -537,11 +534,11 @@ static struct mfc_cache *ipmr_cache_find(struct net *net, int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; - for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { - if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) - break; + list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { + if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) + return c; } - return c; + return NULL; } /* @@ -699,18 +696,21 @@ static int ipmr_cache_report(struct net *net, static int ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) { + bool found = false; int err; struct mfc_cache *c; const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); - for (c=net->ipv4.mfc_unres_queue; c; c=c->next) { + list_for_each_entry(c, &net->ipv4.mfc_unres_queue, list) { if (c->mfc_mcastgrp == iph->daddr && - c->mfc_origin == iph->saddr) + c->mfc_origin == iph->saddr) { + found = true; break; + } } - if (c == NULL) { + if (!found) { /* * Create a new entry if allowable */ @@ -746,8 +746,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) } atomic_inc(&net->ipv4.cache_resolve_queue_len); - c->next = net->ipv4.mfc_unres_queue; - net->ipv4.mfc_unres_queue = c; + list_add(&c->list, &net->ipv4.mfc_unres_queue); mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires); } @@ -774,16 +773,15 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) { int line; - struct mfc_cache *c, **cp; + struct mfc_cache *c, *next; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp = &net->ipv4.mfc_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ipmr_cache_free(c); @@ -795,22 +793,24 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) { + bool found = false; int line; - struct mfc_cache *uc, *c, **cp; + struct mfc_cache *uc, *c; if (mfc->mfcc_parent >= MAXVIFS) return -ENFILE; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - for (cp = &net->ipv4.mfc_cache_array[line]; - (c = *cp) != NULL; cp = &c->next) { + list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && - c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) + c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { + found = true; break; + } } - if (c != NULL) { + if (found) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(net, c, mfc->mfcc_ttls); @@ -835,8 +835,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - c->next = net->ipv4.mfc_cache_array[line]; - net->ipv4.mfc_cache_array[line] = c; + list_add(&c->list, &net->ipv4.mfc_cache_array[line]); write_unlock_bh(&mrt_lock); /* @@ -844,16 +843,15 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) * need to send on the frames and tidy up. */ spin_lock_bh(&mfc_unres_lock); - for (cp = &net->ipv4.mfc_unres_queue; (uc=*cp) != NULL; - cp = &uc->next) { + list_for_each_entry(uc, &net->ipv4.mfc_unres_queue, list) { if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { - *cp = uc->next; + list_del(&uc->list); atomic_dec(&net->ipv4.cache_resolve_queue_len); break; } } - if (net->ipv4.mfc_unres_queue == NULL) + if (list_empty(&net->ipv4.mfc_unres_queue)) del_timer(&net->ipv4.ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); @@ -872,6 +870,7 @@ static void mroute_clean_tables(struct net *net) { int i; LIST_HEAD(list); + struct mfc_cache *c, *next; /* * Shut down all active vif entries @@ -885,17 +884,12 @@ static void mroute_clean_tables(struct net *net) /* * Wipe the cache */ - for (i=0; iipv4.mfc_cache_array[i]; - while ((c = *cp) != NULL) { - if (c->mfc_flags&MFC_STATIC) { - cp = &c->next; + for (i = 0; i < MFC_LINES; i++) { + list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[i], list) { + if (c->mfc_flags&MFC_STATIC) continue; - } write_lock_bh(&mrt_lock); - *cp = c->next; + list_del(&c->list); write_unlock_bh(&mrt_lock); ipmr_cache_free(c); @@ -903,12 +897,9 @@ static void mroute_clean_tables(struct net *net) } if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { - struct mfc_cache *c, **cp; - spin_lock_bh(&mfc_unres_lock); - cp = &net->ipv4.mfc_unres_queue; - while ((c = *cp) != NULL) { - *cp = c->next; + list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { + list_del(&c->list); ipmr_destroy_unres(net, c); } spin_unlock_bh(&mfc_unres_lock); @@ -1789,7 +1780,7 @@ static const struct file_operations ipmr_vif_fops = { struct ipmr_mfc_iter { struct seq_net_private p; - struct mfc_cache **cache; + struct list_head *cache; int ct; }; @@ -1799,18 +1790,18 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, { struct mfc_cache *mfc; - it->cache = net->ipv4.mfc_cache_array; read_lock(&mrt_lock); - for (it->ct = 0; it->ct < MFC_LINES; it->ct++) - for (mfc = net->ipv4.mfc_cache_array[it->ct]; - mfc; mfc = mfc->next) + for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { + it->cache = &net->ipv4.mfc_cache_array[it->ct]; + list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; + } read_unlock(&mrt_lock); - it->cache = &net->ipv4.mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); - for (mfc = net->ipv4.mfc_unres_queue; mfc; mfc = mfc->next) + it->cache = &net->ipv4.mfc_unres_queue; + list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@ -1842,18 +1833,19 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); - if (mfc->next) - return mfc->next; + if (mfc->list.next != it->cache) + return list_entry(mfc->list.next, struct mfc_cache, list); if (it->cache == &net->ipv4.mfc_unres_queue) goto end_of_list; - BUG_ON(it->cache != net->ipv4.mfc_cache_array); + BUG_ON(it->cache != &net->ipv4.mfc_cache_array[it->ct]); while (++it->ct < MFC_LINES) { - mfc = net->ipv4.mfc_cache_array[it->ct]; - if (mfc) - return mfc; + it->cache = &net->ipv4.mfc_cache_array[it->ct]; + if (list_empty(it->cache)) + continue; + return list_first_entry(it->cache, struct mfc_cache, list); } /* exhausted cache_array, show unresolved */ @@ -1862,9 +1854,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) it->ct = 0; spin_lock_bh(&mfc_unres_lock); - mfc = net->ipv4.mfc_unres_queue; - if (mfc) - return mfc; + if (!list_empty(it->cache)) + return list_first_entry(it->cache, struct mfc_cache, list); end_of_list: spin_unlock_bh(&mfc_unres_lock); @@ -1880,7 +1871,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) if (it->cache == &net->ipv4.mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == net->ipv4.mfc_cache_array) + else if (it->cache == &net->ipv4.mfc_cache_array[it->ct]) read_unlock(&mrt_lock); } @@ -1960,6 +1951,7 @@ static const struct net_protocol pim_protocol = { */ static int __net_init ipmr_net_init(struct net *net) { + unsigned int i; int err = 0; net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), @@ -1971,13 +1963,18 @@ static int __net_init ipmr_net_init(struct net *net) /* Forwarding cache */ net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, - sizeof(struct mfc_cache *), + sizeof(struct list_head), GFP_KERNEL); if (!net->ipv4.mfc_cache_array) { err = -ENOMEM; goto fail_mfc_cache; } + for (i = 0; i < MFC_LINES; i++) + INIT_LIST_HEAD(&net->ipv4.mfc_cache_array[i]); + + INIT_LIST_HEAD(&net->ipv4.mfc_unres_queue); + setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process, (unsigned long)net); -- cgit v1.2.3 From 0c12295a741d3186987f96f518cfbdaf01abb087 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 13 Apr 2010 05:03:22 +0000 Subject: ipv4: ipmr: move mroute data into seperate structure Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 13 +- net/ipv4/ipmr.c | 369 +++++++++++++++++++++++++---------------------- 2 files changed, 200 insertions(+), 182 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 5d06429968d5..72e762ab3e5d 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -59,18 +59,7 @@ struct netns_ipv4 { atomic_t rt_genid; #ifdef CONFIG_IP_MROUTE - struct sock *mroute_sk; - struct timer_list ipmr_expire_timer; - struct list_head mfc_unres_queue; - struct list_head *mfc_cache_array; - struct vif_device *vif_table; - int maxvif; - atomic_t cache_resolve_queue_len; - int mroute_do_assert; - int mroute_do_pim; -#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) - int mroute_reg_vif_num; -#endif + struct mr_table *mrt; #endif }; #endif diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 21b5edc2f343..498f4e907d52 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -68,6 +68,21 @@ #define CONFIG_IP_PIMSM 1 #endif +struct mr_table { + struct sock *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct list_head mfc_cache_array[MFC_LINES]; + struct vif_device vif_table[MAXVIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + int mroute_do_assert; + int mroute_do_pim; +#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) + int mroute_reg_vif_num; +#endif +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -78,7 +93,7 @@ static DEFINE_RWLOCK(mrt_lock); * Multicast router control variables */ -#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) +#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); @@ -93,11 +108,12 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; -static int ip_mr_forward(struct net *net, struct sk_buff *skb, - struct mfc_cache *cache, int local); -static int ipmr_cache_report(struct net *net, +static int ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local); +static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); -static int ipmr_fill_mroute(struct net *net, struct sk_buff *skb, +static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ @@ -199,12 +215,12 @@ failure: static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); + struct mr_table *mrt = net->ipv4.mrt; read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; - ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, - IGMPMSG_WHOLEPKT); + ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; @@ -274,17 +290,17 @@ failure: * @notify: Set to 1, if the caller is a notifier_call */ -static int vif_delete(struct net *net, int vifi, int notify, +static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; - if (vifi < 0 || vifi >= net->ipv4.maxvif) + if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; - v = &net->ipv4.vif_table[vifi]; + v = &mrt->vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; @@ -296,17 +312,17 @@ static int vif_delete(struct net *net, int vifi, int notify, } #ifdef CONFIG_IP_PIMSM - if (vifi == net->ipv4.mroute_reg_vif_num) - net->ipv4.mroute_reg_vif_num = -1; + if (vifi == mrt->mroute_reg_vif_num) + mrt->mroute_reg_vif_num = -1; #endif - if (vifi+1 == net->ipv4.maxvif) { + if (vifi+1 == mrt->maxvif) { int tmp; for (tmp=vifi-1; tmp>=0; tmp--) { - if (VIF_EXISTS(net, tmp)) + if (VIF_EXISTS(mrt, tmp)) break; } - net->ipv4.maxvif = tmp+1; + mrt->maxvif = tmp+1; } write_unlock_bh(&mrt_lock); @@ -334,12 +350,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c) and reporting error to netlink readers. */ -static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c) +static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { + struct net *net = NULL; //mrt->net; struct sk_buff *skb; struct nlmsgerr *e; - atomic_dec(&net->ipv4.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { @@ -364,23 +381,23 @@ static void ipmr_destroy_unres(struct net *net, struct mfc_cache *c) static void ipmr_expire_process(unsigned long arg) { - struct net *net = (struct net *)arg; + struct mr_table *mrt = (struct mr_table *)arg; unsigned long now; unsigned long expires; struct mfc_cache *c, *next; if (!spin_trylock(&mfc_unres_lock)) { - mod_timer(&net->ipv4.ipmr_expire_timer, jiffies+HZ/10); + mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); return; } - if (list_empty(&net->ipv4.mfc_unres_queue)) + if (list_empty(&mrt->mfc_unres_queue)) goto out; now = jiffies; expires = 10*HZ; - list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { + list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) @@ -389,11 +406,11 @@ static void ipmr_expire_process(unsigned long arg) } list_del(&c->list); - ipmr_destroy_unres(net, c); + ipmr_destroy_unres(mrt, c); } - if (!list_empty(&net->ipv4.mfc_unres_queue)) - mod_timer(&net->ipv4.ipmr_expire_timer, jiffies + expires); + if (!list_empty(&mrt->mfc_unres_queue)) + mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); out: spin_unlock(&mfc_unres_lock); @@ -401,7 +418,7 @@ out: /* Fill oifs list. It is called under write locked mrt_lock. */ -static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache, +static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, unsigned char *ttls) { int vifi; @@ -410,8 +427,8 @@ static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache, cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXVIFS); - for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { - if (VIF_EXISTS(net, vifi) && + for (vifi = 0; vifi < mrt->maxvif; vifi++) { + if (VIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) @@ -422,16 +439,17 @@ static void ipmr_update_thresholds(struct net *net, struct mfc_cache *cache, } } -static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) +static int vif_add(struct net *net, struct mr_table *mrt, + struct vifctl *vifc, int mrtsock) { int vifi = vifc->vifc_vifi; - struct vif_device *v = &net->ipv4.vif_table[vifi]; + struct vif_device *v = &mrt->vif_table[vifi]; struct net_device *dev; struct in_device *in_dev; int err; /* Is vif busy ? */ - if (VIF_EXISTS(net, vifi)) + if (VIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->vifc_flags) { @@ -441,7 +459,7 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ - if (net->ipv4.mroute_reg_vif_num >= 0) + if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; dev = ipmr_reg_vif(net); if (!dev) @@ -519,22 +537,22 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) v->dev = dev; #ifdef CONFIG_IP_PIMSM if (v->flags&VIFF_REGISTER) - net->ipv4.mroute_reg_vif_num = vifi; + mrt->mroute_reg_vif_num = vifi; #endif - if (vifi+1 > net->ipv4.maxvif) - net->ipv4.maxvif = vifi+1; + if (vifi+1 > mrt->maxvif) + mrt->maxvif = vifi+1; write_unlock_bh(&mrt_lock); return 0; } -static struct mfc_cache *ipmr_cache_find(struct net *net, +static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, __be32 mcastgrp) { int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; - list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { + list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) return c; } @@ -567,8 +585,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void) * A cache entry has gone into a resolved state from queued */ -static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, - struct mfc_cache *c) +static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, + struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; @@ -581,7 +599,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); - if (ipmr_fill_mroute(net, skb, c, NLMSG_DATA(nlh)) > 0) { + if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = (skb_tail_pointer(skb) - (u8 *)nlh); } else { @@ -595,7 +613,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else - ip_mr_forward(net, skb, c, 0); + ip_mr_forward(net, mrt, skb, c, 0); } } @@ -606,7 +624,7 @@ static void ipmr_cache_resolve(struct net *net, struct mfc_cache *uc, * Called under mrt_lock. */ -static int ipmr_cache_report(struct net *net, +static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; @@ -639,7 +657,7 @@ static int ipmr_cache_report(struct net *net, memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; - msg->im_vif = net->ipv4.mroute_reg_vif_num; + msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); @@ -671,7 +689,7 @@ static int ipmr_cache_report(struct net *net, skb->transport_header = skb->network_header; } - if (net->ipv4.mroute_sk == NULL) { + if (mrt->mroute_sk == NULL) { kfree_skb(skb); return -EINVAL; } @@ -679,7 +697,7 @@ static int ipmr_cache_report(struct net *net, /* * Deliver to mrouted */ - ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); + ret = sock_queue_rcv_skb(mrt->mroute_sk, skb); if (ret < 0) { if (net_ratelimit()) printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); @@ -694,7 +712,7 @@ static int ipmr_cache_report(struct net *net, */ static int -ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) +ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) { bool found = false; int err; @@ -702,7 +720,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); - list_for_each_entry(c, &net->ipv4.mfc_unres_queue, list) { + list_for_each_entry(c, &mrt->mfc_unres_queue, list) { if (c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) { found = true; @@ -715,7 +733,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) * Create a new entry if allowable */ - if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || + if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || (c = ipmr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); @@ -733,7 +751,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) /* * Reflect first query at mrouted. */ - err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); + err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker @@ -745,10 +763,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) return err; } - atomic_inc(&net->ipv4.cache_resolve_queue_len); - list_add(&c->list, &net->ipv4.mfc_unres_queue); + atomic_inc(&mrt->cache_resolve_queue_len); + list_add(&c->list, &mrt->mfc_unres_queue); - mod_timer(&net->ipv4.ipmr_expire_timer, c->mfc_un.unres.expires); + mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); } /* @@ -770,14 +788,14 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) * MFC cache manipulation by user space mroute daemon */ -static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) +static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) { int line; struct mfc_cache *c, *next; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[line], list) { + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { write_lock_bh(&mrt_lock); @@ -791,7 +809,8 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) return -ENOENT; } -static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) +static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, + struct mfcctl *mfc, int mrtsock) { bool found = false; int line; @@ -802,7 +821,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); - list_for_each_entry(c, &net->ipv4.mfc_cache_array[line], list) { + list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { found = true; @@ -813,7 +832,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) if (found) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(net, c, mfc->mfcc_ttls); + ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); @@ -830,12 +849,12 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_parent = mfc->mfcc_parent; - ipmr_update_thresholds(net, c, mfc->mfcc_ttls); + ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_lock_bh(&mrt_lock); - list_add(&c->list, &net->ipv4.mfc_cache_array[line]); + list_add(&c->list, &mrt->mfc_cache_array[line]); write_unlock_bh(&mrt_lock); /* @@ -843,20 +862,20 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) * need to send on the frames and tidy up. */ spin_lock_bh(&mfc_unres_lock); - list_for_each_entry(uc, &net->ipv4.mfc_unres_queue, list) { + list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { list_del(&uc->list); - atomic_dec(&net->ipv4.cache_resolve_queue_len); + atomic_dec(&mrt->cache_resolve_queue_len); break; } } - if (list_empty(&net->ipv4.mfc_unres_queue)) - del_timer(&net->ipv4.ipmr_expire_timer); + if (list_empty(&mrt->mfc_unres_queue)) + del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (uc) { - ipmr_cache_resolve(net, uc, c); + ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_free(uc); } return 0; @@ -866,7 +885,7 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) * Close the multicast socket, and clear the vif tables etc */ -static void mroute_clean_tables(struct net *net) +static void mroute_clean_tables(struct mr_table *mrt) { int i; LIST_HEAD(list); @@ -875,9 +894,9 @@ static void mroute_clean_tables(struct net *net) /* * Shut down all active vif entries */ - for (i = 0; i < net->ipv4.maxvif; i++) { - if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) - vif_delete(net, i, 0, &list); + for (i = 0; i < mrt->maxvif; i++) { + if (!(mrt->vif_table[i].flags&VIFF_STATIC)) + vif_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); @@ -885,7 +904,7 @@ static void mroute_clean_tables(struct net *net) * Wipe the cache */ for (i = 0; i < MFC_LINES; i++) { - list_for_each_entry_safe(c, next, &net->ipv4.mfc_cache_array[i], list) { + list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { if (c->mfc_flags&MFC_STATIC) continue; write_lock_bh(&mrt_lock); @@ -896,11 +915,11 @@ static void mroute_clean_tables(struct net *net) } } - if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { + if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); - list_for_each_entry_safe(c, next, &net->ipv4.mfc_unres_queue, list) { + list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { list_del(&c->list); - ipmr_destroy_unres(net, c); + ipmr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } @@ -909,16 +928,17 @@ static void mroute_clean_tables(struct net *net) static void mrtsock_destruct(struct sock *sk) { struct net *net = sock_net(sk); + struct mr_table *mrt = net->ipv4.mrt; rtnl_lock(); - if (sk == net->ipv4.mroute_sk) { + if (sk == mrt->mroute_sk) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; write_lock_bh(&mrt_lock); - net->ipv4.mroute_sk = NULL; + mrt->mroute_sk = NULL; write_unlock_bh(&mrt_lock); - mroute_clean_tables(net); + mroute_clean_tables(mrt); } rtnl_unlock(); } @@ -936,9 +956,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi struct vifctl vif; struct mfcctl mfc; struct net *net = sock_net(sk); + struct mr_table *mrt = net->ipv4.mrt; if (optname != MRT_INIT) { - if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) + if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN)) return -EACCES; } @@ -951,7 +972,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi return -ENOPROTOOPT; rtnl_lock(); - if (net->ipv4.mroute_sk) { + if (mrt->mroute_sk) { rtnl_unlock(); return -EADDRINUSE; } @@ -959,7 +980,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { write_lock_bh(&mrt_lock); - net->ipv4.mroute_sk = sk; + mrt->mroute_sk = sk; write_unlock_bh(&mrt_lock); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; @@ -967,7 +988,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi rtnl_unlock(); return ret; case MRT_DONE: - if (sk != net->ipv4.mroute_sk) + if (sk != mrt->mroute_sk) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: @@ -980,9 +1001,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi return -ENFILE; rtnl_lock(); if (optname == MRT_ADD_VIF) { - ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); + ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk); } else { - ret = vif_delete(net, vif.vifc_vifi, 0, NULL); + ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); } rtnl_unlock(); return ret; @@ -999,9 +1020,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi return -EFAULT; rtnl_lock(); if (optname == MRT_DEL_MFC) - ret = ipmr_mfc_delete(net, &mfc); + ret = ipmr_mfc_delete(mrt, &mfc); else - ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); + ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk); rtnl_unlock(); return ret; /* @@ -1012,7 +1033,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi int v; if (get_user(v,(int __user *)optval)) return -EFAULT; - net->ipv4.mroute_do_assert = (v) ? 1 : 0; + mrt->mroute_do_assert = (v) ? 1 : 0; return 0; } #ifdef CONFIG_IP_PIMSM @@ -1026,9 +1047,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi rtnl_lock(); ret = 0; - if (v != net->ipv4.mroute_do_pim) { - net->ipv4.mroute_do_pim = v; - net->ipv4.mroute_do_assert = v; + if (v != mrt->mroute_do_pim) { + mrt->mroute_do_pim = v; + mrt->mroute_do_assert = v; } rtnl_unlock(); return ret; @@ -1052,6 +1073,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int int olr; int val; struct net *net = sock_net(sk); + struct mr_table *mrt = net->ipv4.mrt; if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM @@ -1073,10 +1095,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int val = 0x0305; #ifdef CONFIG_IP_PIMSM else if (optname == MRT_PIM) - val = net->ipv4.mroute_do_pim; + val = mrt->mroute_do_pim; #endif else - val = net->ipv4.mroute_do_assert; + val = mrt->mroute_do_assert; if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; @@ -1093,16 +1115,17 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); + struct mr_table *mrt = net->ipv4.mrt; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; - if (vr.vifi >= net->ipv4.maxvif) + if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); - vif = &net->ipv4.vif_table[vr.vifi]; - if (VIF_EXISTS(net, vr.vifi)) { + vif = &mrt->vif_table[vr.vifi]; + if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; @@ -1120,7 +1143,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) return -EFAULT; read_lock(&mrt_lock); - c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); + c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; @@ -1143,16 +1166,17 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v { struct net_device *dev = ptr; struct net *net = dev_net(dev); + struct mr_table *mrt = net->ipv4.mrt; struct vif_device *v; int ct; LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &net->ipv4.vif_table[0]; - for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { + v = &mrt->vif_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) - vif_delete(net, ct, 1, &list); + vif_delete(mrt, ct, 1, &list); } unregister_netdevice_many(&list); return NOTIFY_DONE; @@ -1211,11 +1235,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) * Processing handlers for ipmr_forward */ -static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb, - struct mfc_cache *c, int vifi) +static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *c, int vifi) { const struct iphdr *iph = ip_hdr(skb); - struct vif_device *vif = &net->ipv4.vif_table[vifi]; + struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *dev; struct rtable *rt; int encap = 0; @@ -1229,7 +1253,7 @@ static void ipmr_queue_xmit(struct net *net, struct sk_buff *skb, vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; - ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); + ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); goto out_free; } #endif @@ -1312,12 +1336,12 @@ out_free: return; } -static int ipmr_find_vif(struct net_device *dev) +static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) { - struct net *net = dev_net(dev); int ct; - for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { - if (net->ipv4.vif_table[ct].dev == dev) + + for (ct = mrt->maxvif-1; ct >= 0; ct--) { + if (mrt->vif_table[ct].dev == dev) break; } return ct; @@ -1325,8 +1349,9 @@ static int ipmr_find_vif(struct net_device *dev) /* "local" means that we should preserve one skb (for local delivery) */ -static int ip_mr_forward(struct net *net, struct sk_buff *skb, - struct mfc_cache *cache, int local) +static int ip_mr_forward(struct net *net, struct mr_table *mrt, + struct sk_buff *skb, struct mfc_cache *cache, + int local) { int psend = -1; int vif, ct; @@ -1338,7 +1363,7 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb, /* * Wrong interface: drop packet and (maybe) send PIM assert. */ - if (net->ipv4.vif_table[vif].dev != skb->dev) { + if (mrt->vif_table[vif].dev != skb->dev) { int true_vifi; if (skb_rtable(skb)->fl.iif == 0) { @@ -1357,26 +1382,26 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb, } cache->mfc_un.res.wrong_if++; - true_vifi = ipmr_find_vif(skb->dev); + true_vifi = ipmr_find_vif(mrt, skb->dev); - if (true_vifi >= 0 && net->ipv4.mroute_do_assert && + if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, so that we cannot check that packet arrived on an oif. It is bad, but otherwise we would need to move pretty large chunk of pimd to kernel. Ough... --ANK */ - (net->ipv4.mroute_do_pim || + (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; - ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); + ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); } goto dont_forward; } - net->ipv4.vif_table[vif].pkt_in++; - net->ipv4.vif_table[vif].bytes_in += skb->len; + mrt->vif_table[vif].pkt_in++; + mrt->vif_table[vif].bytes_in += skb->len; /* * Forward the frame @@ -1386,7 +1411,8 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb, if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(net, skb2, cache, psend); + ipmr_queue_xmit(net, mrt, skb2, cache, + psend); } psend = ct; } @@ -1395,9 +1421,9 @@ static int ip_mr_forward(struct net *net, struct sk_buff *skb, if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) - ipmr_queue_xmit(net, skb2, cache, psend); + ipmr_queue_xmit(net, mrt, skb2, cache, psend); } else { - ipmr_queue_xmit(net, skb, cache, psend); + ipmr_queue_xmit(net, mrt, skb, cache, psend); return 0; } } @@ -1417,6 +1443,7 @@ int ip_mr_input(struct sk_buff *skb) { struct mfc_cache *cache; struct net *net = dev_net(skb->dev); + struct mr_table *mrt = net->ipv4.mrt; int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; /* Packet is looped back after forward, it should not be @@ -1437,9 +1464,9 @@ int ip_mr_input(struct sk_buff *skb) that we can forward NO IGMP messages. */ read_lock(&mrt_lock); - if (net->ipv4.mroute_sk) { + if (mrt->mroute_sk) { nf_reset(skb); - raw_rcv(net->ipv4.mroute_sk, skb); + raw_rcv(mrt->mroute_sk, skb); read_unlock(&mrt_lock); return 0; } @@ -1448,7 +1475,7 @@ int ip_mr_input(struct sk_buff *skb) } read_lock(&mrt_lock); - cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); + cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); /* * No usable cache entry @@ -1466,9 +1493,9 @@ int ip_mr_input(struct sk_buff *skb) skb = skb2; } - vif = ipmr_find_vif(skb->dev); + vif = ipmr_find_vif(mrt, skb->dev); if (vif >= 0) { - int err = ipmr_cache_unresolved(net, vif, skb); + int err = ipmr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); return err; @@ -1478,7 +1505,7 @@ int ip_mr_input(struct sk_buff *skb) return -ENODEV; } - ip_mr_forward(net, skb, cache, local); + ip_mr_forward(net, mrt, skb, cache, local); read_unlock(&mrt_lock); @@ -1500,6 +1527,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) struct net_device *reg_dev = NULL; struct iphdr *encap; struct net *net = dev_net(skb->dev); + struct mr_table *mrt = net->ipv4.mrt; encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* @@ -1514,8 +1542,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) return 1; read_lock(&mrt_lock); - if (net->ipv4.mroute_reg_vif_num >= 0) - reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; + if (mrt->mroute_reg_vif_num >= 0) + reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); @@ -1550,13 +1578,14 @@ int pim_rcv_v1(struct sk_buff * skb) { struct igmphdr *pim; struct net *net = dev_net(skb->dev); + struct mr_table *mrt = net->ipv4.mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); - if (!net->ipv4.mroute_do_pim || + if (!mrt->mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; @@ -1592,7 +1621,7 @@ drop: #endif static int -ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c, +ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) { int ct; @@ -1604,19 +1633,19 @@ ipmr_fill_mroute(struct net *net, struct sk_buff *skb, struct mfc_cache *c, if (c->mfc_parent > MAXVIFS) return -ENOENT; - if (VIF_EXISTS(net, c->mfc_parent)) - RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); + if (VIF_EXISTS(mrt, c->mfc_parent)) + RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { + if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; - nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; + nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } @@ -1634,11 +1663,12 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; + struct mr_table *mrt = net->ipv4.mrt; struct mfc_cache *cache; struct rtable *rt = skb_rtable(skb); read_lock(&mrt_lock); - cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); + cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); if (cache == NULL) { struct sk_buff *skb2; @@ -1652,7 +1682,7 @@ int ipmr_get_route(struct net *net, } dev = skb->dev; - if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { + if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { read_unlock(&mrt_lock); return -ENODEV; } @@ -1669,14 +1699,14 @@ int ipmr_get_route(struct net *net, iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; iph->version = 0; - err = ipmr_cache_unresolved(net, vif, skb2); + err = ipmr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); return err; } if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ipmr_fill_mroute(net, skb, cache, rtm); + err = ipmr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); return err; } @@ -1694,11 +1724,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { - if (!VIF_EXISTS(net, iter->ct)) + struct mr_table *mrt = net->ipv4.mrt; + + for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { + if (!VIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) - return &net->ipv4.vif_table[iter->ct]; + return &mrt->vif_table[iter->ct]; } return NULL; } @@ -1717,15 +1749,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt = net->ipv4.mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_vif_seq_idx(net, iter, 0); - while (++iter->ct < net->ipv4.maxvif) { - if (!VIF_EXISTS(net, iter->ct)) + while (++iter->ct < mrt->maxvif) { + if (!VIF_EXISTS(mrt, iter->ct)) continue; - return &net->ipv4.vif_table[iter->ct]; + return &mrt->vif_table[iter->ct]; } return NULL; } @@ -1739,6 +1772,7 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); + struct mr_table *mrt = net->ipv4.mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1749,7 +1783,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", - vif - net->ipv4.vif_table, + vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); @@ -1788,11 +1822,12 @@ struct ipmr_mfc_iter { static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { + struct mr_table *mrt = net->ipv4.mrt; struct mfc_cache *mfc; read_lock(&mrt_lock); for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { - it->cache = &net->ipv4.mfc_cache_array[it->ct]; + it->cache = &mrt->mfc_cache_array[it->ct]; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; @@ -1800,7 +1835,7 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, read_unlock(&mrt_lock); spin_lock_bh(&mfc_unres_lock); - it->cache = &net->ipv4.mfc_unres_queue; + it->cache = &mrt->mfc_unres_queue; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; @@ -1827,6 +1862,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt = net->ipv4.mrt; ++*pos; @@ -1836,13 +1872,13 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (mfc->list.next != it->cache) return list_entry(mfc->list.next, struct mfc_cache, list); - if (it->cache == &net->ipv4.mfc_unres_queue) + if (it->cache == &mrt->mfc_unres_queue) goto end_of_list; - BUG_ON(it->cache != &net->ipv4.mfc_cache_array[it->ct]); + BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); while (++it->ct < MFC_LINES) { - it->cache = &net->ipv4.mfc_cache_array[it->ct]; + it->cache = &mrt->mfc_cache_array[it->ct]; if (list_empty(it->cache)) continue; return list_first_entry(it->cache, struct mfc_cache, list); @@ -1850,7 +1886,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) /* exhausted cache_array, show unresolved */ read_unlock(&mrt_lock); - it->cache = &net->ipv4.mfc_unres_queue; + it->cache = &mrt->mfc_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); @@ -1868,10 +1904,11 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt = net->ipv4.mrt; - if (it->cache == &net->ipv4.mfc_unres_queue) + if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); - else if (it->cache == &net->ipv4.mfc_cache_array[it->ct]) + else if (it->cache == &mrt->mfc_cache_array[it->ct]) read_unlock(&mrt_lock); } @@ -1879,6 +1916,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; struct net *net = seq_file_net(seq); + struct mr_table *mrt = net->ipv4.mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1892,14 +1930,14 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) (unsigned long) mfc->mfc_origin, mfc->mfc_parent); - if (it->cache != &net->ipv4.mfc_unres_queue) { + if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++ ) { - if (VIF_EXISTS(net, n) && + if (VIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", @@ -1951,35 +1989,27 @@ static const struct net_protocol pim_protocol = { */ static int __net_init ipmr_net_init(struct net *net) { + struct mr_table *mrt; unsigned int i; int err = 0; - net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), - GFP_KERNEL); - if (!net->ipv4.vif_table) { + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) { err = -ENOMEM; goto fail; } /* Forwarding cache */ - net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, - sizeof(struct list_head), - GFP_KERNEL); - if (!net->ipv4.mfc_cache_array) { - err = -ENOMEM; - goto fail_mfc_cache; - } - for (i = 0; i < MFC_LINES; i++) - INIT_LIST_HEAD(&net->ipv4.mfc_cache_array[i]); + INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); - INIT_LIST_HEAD(&net->ipv4.mfc_unres_queue); + INIT_LIST_HEAD(&mrt->mfc_unres_queue); - setup_timer(&net->ipv4.ipmr_expire_timer, ipmr_expire_process, + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, (unsigned long)net); #ifdef CONFIG_IP_PIMSM - net->ipv4.mroute_reg_vif_num = -1; + mrt->mroute_reg_vif_num = -1; #endif #ifdef CONFIG_PROC_FS @@ -1989,16 +2019,16 @@ static int __net_init ipmr_net_init(struct net *net) if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) goto proc_cache_fail; #endif + + net->ipv4.mrt = mrt; return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip_mr_vif"); proc_vif_fail: - kfree(net->ipv4.mfc_cache_array); + kfree(mrt); #endif -fail_mfc_cache: - kfree(net->ipv4.vif_table); fail: return err; } @@ -2009,8 +2039,7 @@ static void __net_exit ipmr_net_exit(struct net *net) proc_net_remove(net, "ip_mr_cache"); proc_net_remove(net, "ip_mr_vif"); #endif - kfree(net->ipv4.mfc_cache_array); - kfree(net->ipv4.vif_table); + kfree(net->ipv4.mrt); } static struct pernet_operations ipmr_net_ops = { -- cgit v1.2.3 From f0ad0860d01e47a3ffd220564c5c653b3afbe962 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 13 Apr 2010 05:03:23 +0000 Subject: ipv4: ipmr: support multiple tables This patch adds support for multiple independant multicast routing instances, named "tables". Userspace multicast routing daemons can bind to a specific table instance by issuing a setsockopt call using a new option MRT_TABLE. The table number is stored in the raw socket data and affects all following ipmr setsockopt(), getsockopt() and ioctl() calls. By default, a single table (RT_TABLE_DEFAULT) is created with a default routing rule pointing to it. Newly created pimreg devices have the table number appended ("pimregX"), with the exception of devices created in the default table, which are named just "pimreg" for compatibility reasons. Packets are directed to a specific table instance using routing rules, similar to how regular routing rules work. Currently iif, oif and mark are supported as keys, source and destination addresses could be supported additionally. Example usage: - bind pimd/xorp/... to a specific table: uint32_t table = 123; setsockopt(fd, IPPROTO_IP, MRT_TABLE, &table, sizeof(table)); - create routing rules directing packets to the new table: # ip mrule add iif eth0 lookup 123 # ip mrule add oif eth0 lookup 123 Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/fib_rules.h | 1 + include/linux/mroute.h | 3 +- include/net/netns/ipv4.h | 5 + include/net/raw.h | 1 + net/ipv4/Kconfig | 14 ++ net/ipv4/ipmr.c | 399 +++++++++++++++++++++++++++++++++++++++------- 6 files changed, 361 insertions(+), 62 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index 405e41139a4d..04a397619ebe 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -21,6 +21,7 @@ #define FIB_RULES_IPV4 AF_INET #define FIB_RULES_IPV6 AF_INET6 #define FIB_RULES_DECNET AF_DECnet +#define FIB_RULES_IPMR 128 struct fib_rule_hdr { __u8 family; diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 7ff6c77d6008..fa04b246c9ae 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -27,7 +27,8 @@ #define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */ #define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */ #define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */ -#define MRT_PIM (MRT_BASE+8) /* enable PIM code */ +#define MRT_PIM (MRT_BASE+8) /* enable PIM code */ +#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */ #define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */ #define SIOCGETSGCNT (SIOCPROTOPRIVATE+1) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 72e762ab3e5d..ae07feec6446 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -59,7 +59,12 @@ struct netns_ipv4 { atomic_t rt_genid; #ifdef CONFIG_IP_MROUTE +#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES struct mr_table *mrt; +#else + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; +#endif #endif }; #endif diff --git a/include/net/raw.h b/include/net/raw.h index 67cc64369432..43c57502659b 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -61,6 +61,7 @@ struct raw_sock { /* inet_sock has to be the first member */ struct inet_sock inet; struct icmp_filter filter; + u32 ipmr_table; }; static inline struct raw_sock *raw_sk(const struct sock *sk) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index c9a1c68767ff..be597749c385 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -250,6 +250,20 @@ config IP_MROUTE . If you haven't heard about it, you don't need it. +config IP_MROUTE_MULTIPLE_TABLES + bool "IP: multicast policy routing" + depends on IP_ADVANCED_ROUTER + select FIB_RULES + help + Normally, a multicast router runs a userspace daemon and decides + what to do with a multicast packet based on the source and + destination addresses. If you say Y here, the multicast router + will also be able to take interfaces and packet marks into + account and run multiple instances of userspace daemons + simultaneously, each one handling a single table. + + If unsure, say N. + config IP_PIMSM_V1 bool "IP: PIM-SM version 1 support" depends on IP_MROUTE diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 498f4e907d52..5df5fd74c6d1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -63,12 +63,15 @@ #include #include #include +#include #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) #define CONFIG_IP_PIMSM 1 #endif struct mr_table { + struct list_head list; + u32 id; struct sock *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; @@ -83,6 +86,14 @@ struct mr_table { #endif }; +struct ipmr_rule { + struct fib_rule common; +}; + +struct ipmr_result { + struct mr_table *mrt; +}; + /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. */ @@ -108,6 +119,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; +static struct mr_table *ipmr_new_table(struct net *net, u32 id); static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); @@ -115,6 +127,206 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static void ipmr_expire_process(unsigned long arg); + +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES +#define ipmr_for_each_table(mrt, net) \ + list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) + +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + ipmr_for_each_table(mrt, net) { + if (mrt->id == id) + return mrt; + } + return NULL; +} + +static int ipmr_fib_lookup(struct net *net, struct flowi *flp, + struct mr_table **mrt) +{ + struct ipmr_result res; + struct fib_lookup_arg arg = { .result = &res, }; + int err; + + err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg); + if (err < 0) + return err; + *mrt = res.mrt; + return 0; +} + +static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, + int flags, struct fib_lookup_arg *arg) +{ + struct ipmr_result *res = arg->result; + struct mr_table *mrt; + + switch (rule->action) { + case FR_ACT_TO_TBL: + break; + case FR_ACT_UNREACHABLE: + return -ENETUNREACH; + case FR_ACT_PROHIBIT: + return -EACCES; + case FR_ACT_BLACKHOLE: + default: + return -EINVAL; + } + + mrt = ipmr_get_table(rule->fr_net, rule->table); + if (mrt == NULL) + return -EAGAIN; + res->mrt = mrt; + return 0; +} + +static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +{ + return 1; +} + +static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { + FRA_GENERIC_POLICY, +}; + +static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh, struct nlattr **tb) +{ + return 0; +} + +static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, + struct nlattr **tb) +{ + return 1; +} + +static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, + struct fib_rule_hdr *frh) +{ + frh->dst_len = 0; + frh->src_len = 0; + frh->tos = 0; + return 0; +} + +static struct fib_rules_ops ipmr_rules_ops_template = { + .family = FIB_RULES_IPMR, + .rule_size = sizeof(struct ipmr_rule), + .addr_size = sizeof(u32), + .action = ipmr_rule_action, + .match = ipmr_rule_match, + .configure = ipmr_rule_configure, + .compare = ipmr_rule_compare, + .default_pref = fib_default_rule_pref, + .fill = ipmr_rule_fill, + .nlgroup = RTNLGRP_IPV4_RULE, + .policy = ipmr_rule_policy, + .owner = THIS_MODULE, +}; + +static int __net_init ipmr_rules_init(struct net *net) +{ + struct fib_rules_ops *ops; + struct mr_table *mrt; + int err; + + ops = fib_rules_register(&ipmr_rules_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + + INIT_LIST_HEAD(&net->ipv4.mr_tables); + + mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) { + err = -ENOMEM; + goto err1; + } + + err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); + if (err < 0) + goto err2; + + net->ipv4.mr_rules_ops = ops; + return 0; + +err2: + kfree(mrt); +err1: + fib_rules_unregister(ops); + return err; +} + +static void __net_exit ipmr_rules_exit(struct net *net) +{ + struct mr_table *mrt, *next; + + list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) + kfree(mrt); + fib_rules_unregister(net->ipv4.mr_rules_ops); +} +#else +#define ipmr_for_each_table(mrt, net) \ + for (mrt = net->ipv4.mrt; mrt; mrt = NULL) + +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + return net->ipv4.mrt; +} + +static int ipmr_fib_lookup(struct net *net, struct flowi *flp, + struct mr_table **mrt) +{ + *mrt = net->ipv4.mrt; + return 0; +} + +static int __net_init ipmr_rules_init(struct net *net) +{ + net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); + return net->ipv4.mrt ? 0 : -ENOMEM; +} + +static void __net_exit ipmr_rules_exit(struct net *net) +{ + kfree(net->ipv4.mrt); +} +#endif + +static struct mr_table *ipmr_new_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + unsigned int i; + + mrt = ipmr_get_table(net, id); + if (mrt != NULL) + return mrt; + + mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); + if (mrt == NULL) + return NULL; + mrt->id = id; + + /* Forwarding cache */ + for (i = 0; i < MFC_LINES; i++) + INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); + + INIT_LIST_HEAD(&mrt->mfc_unres_queue); + + setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, + (unsigned long)mrt); + +#ifdef CONFIG_IP_PIMSM + mrt->mroute_reg_vif_num = -1; +#endif +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES + list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); +#endif + return mrt; +} /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ @@ -215,7 +427,17 @@ failure: static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; + struct flowi fl = { + .oif = dev->ifindex, + .iif = skb->skb_iif, + .mark = skb->mark, + }; + int err; + + err = ipmr_fib_lookup(net, &fl, &mrt); + if (err < 0) + return err; read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; @@ -240,12 +462,18 @@ static void reg_vif_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; } -static struct net_device *ipmr_reg_vif(struct net *net) +static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { struct net_device *dev; struct in_device *in_dev; + char name[IFNAMSIZ]; + + if (mrt->id == RT_TABLE_DEFAULT) + sprintf(name, "pimreg"); + else + sprintf(name, "pimreg%u", mrt->id); - dev = alloc_netdev(0, "pimreg", reg_vif_setup); + dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; @@ -461,7 +689,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, */ if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; - dev = ipmr_reg_vif(net); + dev = ipmr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); @@ -928,17 +1156,19 @@ static void mroute_clean_tables(struct mr_table *mrt) static void mrtsock_destruct(struct sock *sk) { struct net *net = sock_net(sk); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; rtnl_lock(); - if (sk == mrt->mroute_sk) { - IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; + ipmr_for_each_table(mrt, net) { + if (sk == mrt->mroute_sk) { + IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; - write_lock_bh(&mrt_lock); - mrt->mroute_sk = NULL; - write_unlock_bh(&mrt_lock); + write_lock_bh(&mrt_lock); + mrt->mroute_sk = NULL; + write_unlock_bh(&mrt_lock); - mroute_clean_tables(mrt); + mroute_clean_tables(mrt); + } } rtnl_unlock(); } @@ -956,7 +1186,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi struct vifctl vif; struct mfcctl mfc; struct net *net = sock_net(sk); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT_INIT) { if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN)) @@ -1054,6 +1288,27 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi rtnl_unlock(); return ret; } +#endif +#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES + case MRT_TABLE: + { + u32 v; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(v, (u32 __user *)optval)) + return -EFAULT; + if (sk == mrt->mroute_sk) + return -EBUSY; + + rtnl_lock(); + ret = 0; + if (!ipmr_new_table(net, v)) + ret = -ENOMEM; + raw_sk(sk)->ipmr_table = v; + rtnl_unlock(); + return ret; + } #endif /* * Spurious command, or MRT_VERSION which you cannot @@ -1073,7 +1328,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int int olr; int val; struct net *net = sock_net(sk); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM @@ -1115,7 +1374,11 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; + + mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: @@ -1166,17 +1429,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v { struct net_device *dev = ptr; struct net *net = dev_net(dev); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; struct vif_device *v; int ct; LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; - v = &mrt->vif_table[0]; - for (ct = 0; ct < mrt->maxvif; ct++, v++) { - if (v->dev == dev) - vif_delete(mrt, ct, 1, &list); + + ipmr_for_each_table(mrt, net) { + v = &mrt->vif_table[0]; + for (ct = 0; ct < mrt->maxvif; ct++, v++) { + if (v->dev == dev) + vif_delete(mrt, ct, 1, &list); + } } unregister_netdevice_many(&list); return NOTIFY_DONE; @@ -1443,8 +1709,9 @@ int ip_mr_input(struct sk_buff *skb) { struct mfc_cache *cache; struct net *net = dev_net(skb->dev); - struct mr_table *mrt = net->ipv4.mrt; int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; + struct mr_table *mrt; + int err; /* Packet is looped back after forward, it should not be forwarded second time, but still can be delivered locally. @@ -1452,6 +1719,10 @@ int ip_mr_input(struct sk_buff *skb) if (IPCB(skb)->flags&IPSKB_FORWARDED) goto dont_forward; + err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); + if (err < 0) + return err; + if (!local) { if (IPCB(skb)->opt.router_alert) { if (ip_call_ra_chain(skb)) @@ -1522,12 +1793,11 @@ dont_forward: } #ifdef CONFIG_IP_PIMSM -static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) +static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, + unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; - struct net *net = dev_net(skb->dev); - struct mr_table *mrt = net->ipv4.mrt; encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* @@ -1578,18 +1848,21 @@ int pim_rcv_v1(struct sk_buff * skb) { struct igmphdr *pim; struct net *net = dev_net(skb->dev); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); + if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) + goto drop; + if (!mrt->mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - if (__pim_rcv(skb, sizeof(*pim))) { + if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } @@ -1601,6 +1874,8 @@ drop: static int pim_rcv(struct sk_buff * skb) { struct pimreghdr *pim; + struct net *net = dev_net(skb->dev); + struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; @@ -1612,7 +1887,10 @@ static int pim_rcv(struct sk_buff * skb) csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; - if (__pim_rcv(skb, sizeof(*pim))) { + if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) + goto drop; + + if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } @@ -1663,10 +1941,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt; struct mfc_cache *cache; struct rtable *rt = skb_rtable(skb); + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return -ENOENT; + read_lock(&mrt_lock); cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); @@ -1717,6 +1999,7 @@ int ipmr_get_route(struct net *net, */ struct ipmr_vif_iter { struct seq_net_private p; + struct mr_table *mrt; int ct; }; @@ -1724,7 +2007,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt = iter->mrt; for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { if (!VIF_EXISTS(mrt, iter->ct)) @@ -1738,7 +2021,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { + struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt; + + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + + iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) @@ -1749,7 +2040,7 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) @@ -1771,8 +2062,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { - struct net *net = seq_file_net(seq); - struct mr_table *mrt = net->ipv4.mrt; + struct ipmr_vif_iter *iter = seq->private; + struct mr_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1814,6 +2105,7 @@ static const struct file_operations ipmr_vif_fops = { struct ipmr_mfc_iter { struct seq_net_private p; + struct mr_table *mrt; struct list_head *cache; int ct; }; @@ -1822,7 +2114,7 @@ struct ipmr_mfc_iter { static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt = it->mrt; struct mfc_cache *mfc; read_lock(&mrt_lock); @@ -1850,7 +2142,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); + struct mr_table *mrt; + + mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); + if (mrt == NULL) + return ERR_PTR(-ENOENT); + it->mrt = mrt; it->cache = NULL; it->ct = 0; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) @@ -1862,7 +2160,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt = it->mrt; ++*pos; @@ -1903,8 +2201,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; - struct net *net = seq_file_net(seq); - struct mr_table *mrt = net->ipv4.mrt; + struct mr_table *mrt = it->mrt; if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); @@ -1915,8 +2212,6 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; - struct net *net = seq_file_net(seq); - struct mr_table *mrt = net->ipv4.mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -1924,6 +2219,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; + const struct mr_table *mrt = it->mrt; seq_printf(seq, "%08lX %08lX %-3hd", (unsigned long) mfc->mfc_mcastgrp, @@ -1989,28 +2285,11 @@ static const struct net_protocol pim_protocol = { */ static int __net_init ipmr_net_init(struct net *net) { - struct mr_table *mrt; - unsigned int i; - int err = 0; + int err; - mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); - if (mrt == NULL) { - err = -ENOMEM; + err = ipmr_rules_init(net); + if (err < 0) goto fail; - } - - /* Forwarding cache */ - for (i = 0; i < MFC_LINES; i++) - INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); - - INIT_LIST_HEAD(&mrt->mfc_unres_queue); - - setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, - (unsigned long)net); - -#ifdef CONFIG_IP_PIMSM - mrt->mroute_reg_vif_num = -1; -#endif #ifdef CONFIG_PROC_FS err = -ENOMEM; @@ -2019,15 +2298,13 @@ static int __net_init ipmr_net_init(struct net *net) if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) goto proc_cache_fail; #endif - - net->ipv4.mrt = mrt; return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip_mr_vif"); proc_vif_fail: - kfree(mrt); + ipmr_rules_exit(net); #endif fail: return err; @@ -2039,7 +2316,7 @@ static void __net_exit ipmr_net_exit(struct net *net) proc_net_remove(net, "ip_mr_cache"); proc_net_remove(net, "ip_mr_vif"); #endif - kfree(net->ipv4.mrt); + ipmr_rules_exit(net); } static struct pernet_operations ipmr_net_ops = { -- cgit v1.2.3 From b0ebb739a8f68039f03e80b3476b204fe5adf0d7 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 15 Apr 2010 13:29:28 +0200 Subject: ipv4: ipmr: fix invalid cache resolving when adding a non-matching entry The patch to convert struct mfc_cache to list_heads (ipv4: ipmr: convert struct mfc_cache to struct list_head) introduced a bug when adding new cache entries that don't match any unresolved entries. The unres queue is searched for a matching entry, which is then resolved. When no matching entry is present, the iterator points to the head of the list, but is treated as a matching entry. Use a seperate variable to indicate that a matching entry was found. Signed-off-by: Patrick McHardy --- net/ipv4/ipmr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5df5fd74c6d1..0643fb6d47c4 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1089,12 +1089,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ + found = false; spin_lock_bh(&mfc_unres_lock); list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { list_del(&uc->list); atomic_dec(&mrt->cache_resolve_queue_len); + found = true; break; } } @@ -1102,7 +1104,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); - if (uc) { + if (found) { ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_free(uc); } -- cgit v1.2.3 From 8de53dfbf9a0a0f7538c005137059c5c021476e1 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 15 Apr 2010 13:29:28 +0200 Subject: ipv4: ipmr: fix NULL pointer deref during unres queue destruction Fix an oversight in ipmr_destroy_unres() - the net pointer is unconditionally initialized to NULL, resulting in a NULL pointer dereference later on. Fix by adding a net pointer to struct mr_table and using it in ipmr_destroy_unres(). Signed-off-by: Patrick McHardy --- net/ipv4/ipmr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 0643fb6d47c4..7d8a2bcecb76 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -71,6 +71,9 @@ struct mr_table { struct list_head list; +#ifdef CONFIG_NET_NS + struct net *net; +#endif u32 id; struct sock *mroute_sk; struct timer_list ipmr_expire_timer; @@ -308,6 +311,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); if (mrt == NULL) return NULL; + write_pnet(&mrt->net, net); mrt->id = id; /* Forwarding cache */ @@ -580,7 +584,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c) static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { - struct net *net = NULL; //mrt->net; + struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; struct nlmsgerr *e; -- cgit v1.2.3 From 0eae88f31ca2b88911ce843452054139e028771f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 20 Apr 2010 19:06:52 -0700 Subject: net: Fix various endianness glitches Sparse can help us find endianness bugs, but we need to make some cleanups to be able to more easily spot real bugs. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 2 +- net/bridge/br_private.h | 15 ++++++++------- net/ethernet/eth.c | 2 +- net/ipv4/af_inet.c | 8 ++++---- net/ipv4/ipmr.c | 10 +++++----- net/ipv4/route.c | 29 ++++++++++++++--------------- net/ipv4/tcp.c | 15 ++++++++------- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/tcp_output.c | 4 ++-- net/ipv4/udp.c | 8 ++++---- net/ipv6/addrconf.c | 3 ++- net/ipv6/ip6_fib.c | 3 ++- net/ipv6/tcp_ipv6.c | 4 ++-- net/ipv6/udp.c | 4 ++-- net/sched/sch_sfq.c | 10 +++++----- net/sunrpc/xprt.c | 2 +- net/xfrm/xfrm_hash.h | 3 ++- 17 files changed, 65 insertions(+), 61 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 3fe86ffc069c..61e1d1094b85 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -29,7 +29,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) { - return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); + return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); } static struct net_bridge_mdb_entry *__br_mdb_ip_get( diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 791d4ab0fd4d..63181e4a2a67 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -130,19 +130,20 @@ struct net_bridge_port #endif }; +struct br_cpu_netstats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +}; + struct net_bridge { spinlock_t lock; struct list_head port_list; struct net_device *dev; - struct br_cpu_netstats __percpu { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long tx_packets; - unsigned long tx_bytes; - } *stats; - + struct br_cpu_netstats __percpu *stats; spinlock_t hash_lock; struct hlist_head hash[BR_HASH_SIZE]; unsigned long feature_mask; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 205a1c12f3c0..35846964082c 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb) default: printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n", - dev->name, (int)eth->h_proto); + dev->name, (__force int)eth->h_proto); memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); break; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5ca7290c2e61..9f52880fae10 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1323,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto out_unlock; - id = ntohl(*(u32 *)&iph->id); - flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); + id = ntohl(*(__be32 *)&iph->id); + flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); id >>= 16; for (p = *head; p; p = p->next) { @@ -1337,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, if ((iph->protocol ^ iph2->protocol) | (iph->tos ^ iph2->tos) | - (iph->saddr ^ iph2->saddr) | - (iph->daddr ^ iph2->daddr)) { + ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | + ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7d8a2bcecb76..a2df5012a1d0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1772,10 +1772,10 @@ int ip_mr_input(struct sk_buff *skb) vif = ipmr_find_vif(mrt, skb->dev); if (vif >= 0) { - int err = ipmr_cache_unresolved(mrt, vif, skb); + int err2 = ipmr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); - return err; + return err2; } read_unlock(&mrt_lock); kfree_skb(skb); @@ -2227,9 +2227,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) const struct ipmr_mfc_iter *it = seq->private; const struct mr_table *mrt = it->mrt; - seq_printf(seq, "%08lX %08lX %-3hd", - (unsigned long) mfc->mfc_mcastgrp, - (unsigned long) mfc->mfc_origin, + seq_printf(seq, "%08X %08X %-3hd", + (__force u32) mfc->mfc_mcastgrp, + (__force u32) mfc->mfc_origin, mfc->mfc_parent); if (it->cache != &mrt->mfc_unres_queue) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cb562fdd9b9a..a947428ef0ae 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -258,10 +258,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); (__raw_get_cpu_var(rt_cache_stat).field++) static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, - int genid) + int genid) { - return jhash_3words((__force u32)(__be32)(daddr), - (__force u32)(__be32)(saddr), + return jhash_3words((__force u32)daddr, (__force u32)saddr, idx, genid) & rt_hash_mask; } @@ -378,12 +377,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) struct rtable *r = v; int len; - seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" - "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", + seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" + "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", r->u.dst.dev ? r->u.dst.dev->name : "*", - (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, + (__force u32)r->rt_dst, + (__force u32)r->rt_gateway, r->rt_flags, atomic_read(&r->u.dst.__refcnt), - r->u.dst.__use, 0, (unsigned long)r->rt_src, + r->u.dst.__use, 0, (__force u32)r->rt_src, (dst_metric(&r->u.dst, RTAX_ADVMSS) ? (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), dst_metric(&r->u.dst, RTAX_WINDOW), @@ -685,18 +685,17 @@ static inline bool rt_caching(const struct net *net) static inline bool compare_hash_inputs(const struct flowi *fl1, const struct flowi *fl2) { - return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | - (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | + return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | + ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | (fl1->iif ^ fl2->iif)) == 0); } static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) { - return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | - (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | + return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | + ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | (fl1->mark ^ fl2->mark) | - (*(u16 *)&fl1->nl_u.ip4_u.tos ^ - *(u16 *)&fl2->nl_u.ip4_u.tos) | + (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | (fl1->oif ^ fl2->oif) | (fl1->iif ^ fl2->iif)) == 0; } @@ -2319,8 +2318,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, rcu_read_lock(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; rth = rcu_dereference(rth->u.dst.rt_next)) { - if (((rth->fl.fl4_dst ^ daddr) | - (rth->fl.fl4_src ^ saddr) | + if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | + ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | (rth->fl.iif ^ iif) | rth->fl.oif | (rth->fl.fl4_tos ^ tos)) == 0 && diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 77208334a613..6689c61cab47 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) struct tcphdr *th2; unsigned int len; unsigned int thlen; - unsigned int flags; + __be32 flags; unsigned int mss = 1; unsigned int hlen; unsigned int off; @@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) found: flush = NAPI_GRO_CB(p)->flush; - flush |= flags & TCP_FLAG_CWR; - flush |= (flags ^ tcp_flag_word(th2)) & - ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); - flush |= th->ack_seq ^ th2->ack_seq; + flush |= (__force int)(flags & TCP_FLAG_CWR); + flush |= (__force int)((flags ^ tcp_flag_word(th2)) & + ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); + flush |= (__force int)(th->ack_seq ^ th2->ack_seq); for (i = sizeof(*th); i < thlen; i += 4) flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i); @@ -2795,8 +2795,9 @@ found: out_check_final: flush = len < mss; - flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | - TCP_FLAG_SYN | TCP_FLAG_FIN); + flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | + TCP_FLAG_RST | TCP_FLAG_SYN | + TCP_FLAG_FIN)); if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) pp = head; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad08392a738c..4d6717d1e61c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1286,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_release; /* Secret recipe starts with IP addresses */ - *mess++ ^= daddr; - *mess++ ^= saddr; + *mess++ ^= (__force u32)daddr; + *mess++ ^= (__force u32)saddr; /* plus variable length Initiator Cookie */ c = (u8 *)mess; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2b7d71fb8439..429ad9286efc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -861,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->urg_ptr = htons(tp->snd_up - tcb->seq); th->urg = 1; } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { - th->urg_ptr = 0xFFFF; + th->urg_ptr = htons(0xFFFF); th->urg = 1; } } @@ -2485,7 +2485,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, *tail-- ^= TCP_SKB_CB(skb)->seq + 1; /* recommended */ - *tail-- ^= ((th->dest << 16) | th->source); + *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ sha_transform((__u32 *)&xvp->cookie_bakery[0], diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 666b963496ff..1e18f9cc9247 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, unsigned int port) { - return jhash_1word(saddr, net_hash_mix(net)) ^ port; + return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = - udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); + udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); @@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, daddr, hnum, dif, hslot2, slot2); if (!result) { - hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); + hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, - INADDR_ANY, hnum, dif, + htonl(INADDR_ANY), hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7cba8845242f..34d2d649e396 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -588,7 +588,8 @@ static u32 ipv6_addr_hash(const struct in6_addr *addr) * We perform the hash function over the last 64 bits of the address * This will include the IEEE address token on links that support it. */ - return jhash_2words(addr->s6_addr32[2], addr->s6_addr32[3], 0) + return jhash_2words((__force u32)addr->s6_addr32[2], + (__force u32)addr->s6_addr32[3], 0) & (IN6_ADDR_HSIZE - 1); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index dc6e0b8f260d..92a122b7795d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -144,7 +144,8 @@ static __inline__ __be32 addr_bit_set(void *token, int fn_bit) * htonl(1 << ((~fn_bit)&0x1F)) * See include/asm-generic/bitops/le.h. */ - return (1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5]; + return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & + addr[fn_bit >> 5]; } static __inline__ struct fib6_node * node_alloc(void) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index bd5ef7b6e48e..a92b4a5cd8bf 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1234,12 +1234,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; /* Secret recipe starts with IP addresses */ - d = &ipv6_hdr(skb)->daddr.s6_addr32[0]; + d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; - d = &ipv6_hdr(skb)->saddr.s6_addr32[0]; + d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; *mess++ ^= *d++; *mess++ ^= *d++; *mess++ ^= *d++; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 90824852f598..92bf9033e245 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net, if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) - hash = jhash_1word(addr6->s6_addr32[3], mix); + hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else - hash = jhash2(addr6->s6_addr32, 4, mix); + hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index c5a9ac566007..c65762823f5e 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) case htons(ETH_P_IP): { const struct iphdr *iph = ip_hdr(skb); - h = iph->daddr; - h2 = iph->saddr ^ iph->protocol; + h = (__force u32)iph->daddr; + h2 = (__force u32)iph->saddr ^ iph->protocol; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || @@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) case htons(ETH_P_IPV6): { struct ipv6hdr *iph = ipv6_hdr(skb); - h = iph->daddr.s6_addr32[3]; - h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr; + h = (__force u32)iph->daddr.s6_addr32[3]; + h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || iph->nexthdr == IPPROTO_UDPLITE || @@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) break; } default: - h = (unsigned long)skb_dst(skb) ^ skb->protocol; + h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; h2 = (unsigned long)skb->sk; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 42f09ade0044..699ade68aac1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -974,7 +974,7 @@ void xprt_reserve(struct rpc_task *task) static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) { - return xprt->xid++; + return (__force __be32)xprt->xid++; } static inline void xprt_init_xid(struct rpc_xprt *xprt) diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index e5195c99f71e..1396572d2ade 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr) static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) { - return ntohl(daddr->a4 + saddr->a4); + u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4; + return ntohl((__force __be32)sum); } static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) -- cgit v1.2.3 From 3d0c9c4eb2dbdcc461be4084abd87a9a9e70f713 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 26 Apr 2010 16:02:04 +0200 Subject: net: fib_rules: mark arguments to fib_rules_register const and __net_initdata fib_rules_register() duplicates the template passed to it without modification, mark the argument as const. Additionally the templates are only needed when instantiating a new namespace, so mark them as __net_initdata, which means they can be discarded when CONFIG_NET_NS=n. Signed-off-by: Patrick McHardy --- include/net/fib_rules.h | 2 +- net/core/fib_rules.c | 2 +- net/decnet/dn_rules.c | 2 +- net/ipv4/fib_rules.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv6/fib6_rules.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 52bd9e6c9141..e8923bc20f9f 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -104,7 +104,7 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) return frh->table; } -extern struct fib_rules_ops *fib_rules_register(struct fib_rules_ops *, struct net *); +extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *); extern void fib_rules_unregister(struct fib_rules_ops *); extern void fib_rules_cleanup_ops(struct fib_rules_ops *); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 1bc66592453c..42e84e08a1be 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -122,7 +122,7 @@ errout: } struct fib_rules_ops * -fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) +fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) { struct fib_rules_ops *ops; int err; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index af28dcc21844..1226bcad776b 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -216,7 +216,7 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) dn_rt_cache_flush(-1); } -static struct fib_rules_ops dn_fib_rules_ops_template = { +static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = { .family = FIB_RULES_DECNET, .rule_size = sizeof(struct dn_fib_rule), .addr_size = sizeof(u16), diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 3ec84fea5b71..8ab62a56701c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -245,7 +245,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops) rt_cache_flush(ops->fro_net, -1); } -static struct fib_rules_ops fib4_rules_ops_template = { +static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { .family = FIB_RULES_IPV4, .rule_size = sizeof(struct fib4_rule), .addr_size = sizeof(u32), diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a2df5012a1d0..7d3e382aed64 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -216,7 +216,7 @@ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, return 0; } -static struct fib_rules_ops ipmr_rules_ops_template = { +static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { .family = FIB_RULES_IPMR, .rule_size = sizeof(struct ipmr_rule), .addr_size = sizeof(u32), diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 8124f16f2ac2..35f6949446f0 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -237,7 +237,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) + nla_total_size(16); /* src */ } -static struct fib_rules_ops fib6_rules_ops_template = { +static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = { .family = FIB_RULES_IPV6, .rule_size = sizeof(struct fib6_rule), .addr_size = sizeof(struct in6_addr), -- cgit v1.2.3 From 25239cee7e8732dbdc9f5d324f1c22a3bdec1d1f Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 26 Apr 2010 16:02:05 +0200 Subject: net: rtnetlink: decouple rtnetlink address families from real address families Decouple rtnetlink address families from real address families in socket.h to be able to add rtnetlink interfaces to code that is not a real address family without increasing AF_MAX/NPROTO. This will be used to add support for multicast route dumping from all tables as the proc interface can't be extended to support anything but the main table without breaking compatibility. This partialy undoes the patch to introduce independant families for routing rules and converts ipmr routing rules to a new rtnetlink family. Similar to that patch, values up to 127 are reserved for real address families, values above that may be used arbitrarily. Signed-off-by: Patrick McHardy --- include/linux/fib_rules.h | 8 -------- include/linux/rtnetlink.h | 6 ++++++ net/core/rtnetlink.c | 14 +++++++------- net/decnet/dn_rules.c | 2 +- net/ipv4/fib_rules.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv6/fib6_rules.c | 2 +- 7 files changed, 17 insertions(+), 19 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index 04a397619ebe..51da65b68b85 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -15,14 +15,6 @@ /* try to find source address in routing lookups */ #define FIB_RULE_FIND_SADDR 0x00010000 -/* fib_rules families. values up to 127 are reserved for real address - * families, values above 128 may be used arbitrarily. - */ -#define FIB_RULES_IPV4 AF_INET -#define FIB_RULES_IPV6 AF_INET6 -#define FIB_RULES_DECNET AF_DECnet -#define FIB_RULES_IPMR 128 - struct fib_rule_hdr { __u8 family; __u8 dst_len; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index d1c7c90e9cd4..5a42c36cb6aa 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,6 +7,12 @@ #include #include +/* rtnetlink families. Values up to 127 are reserved for real address + * families, values above 128 may be used arbitrarily. + */ +#define RTNL_FAMILY_IPMR 128 +#define RTNL_FAMILY_MAX 128 + /**** * Routing/neighbour discovery messages. ****/ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 78c85985cb30..fd781b62fa7f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -98,7 +98,7 @@ int lockdep_rtnl_is_held(void) EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ -static struct rtnl_link *rtnl_msg_handlers[NPROTO]; +static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) { @@ -118,7 +118,7 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) { struct rtnl_link *tab; - if (protocol < NPROTO) + if (protocol <= RTNL_FAMILY_MAX) tab = rtnl_msg_handlers[protocol]; else tab = NULL; @@ -133,7 +133,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) { struct rtnl_link *tab; - if (protocol < NPROTO) + if (protocol <= RTNL_FAMILY_MAX) tab = rtnl_msg_handlers[protocol]; else tab = NULL; @@ -167,7 +167,7 @@ int __rtnl_register(int protocol, int msgtype, struct rtnl_link *tab; int msgindex; - BUG_ON(protocol < 0 || protocol >= NPROTO); + BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); tab = rtnl_msg_handlers[protocol]; @@ -219,7 +219,7 @@ int rtnl_unregister(int protocol, int msgtype) { int msgindex; - BUG_ON(protocol < 0 || protocol >= NPROTO); + BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); if (rtnl_msg_handlers[protocol] == NULL) @@ -241,7 +241,7 @@ EXPORT_SYMBOL_GPL(rtnl_unregister); */ void rtnl_unregister_all(int protocol) { - BUG_ON(protocol < 0 || protocol >= NPROTO); + BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); kfree(rtnl_msg_handlers[protocol]); rtnl_msg_handlers[protocol] = NULL; @@ -1384,7 +1384,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) if (s_idx == 0) s_idx = 1; - for (idx = 1; idx < NPROTO; idx++) { + for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { int type = cb->nlh->nlmsg_type-RTM_BASE; if (idx < s_idx || idx == PF_PACKET) continue; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 1226bcad776b..48fdf10be7a1 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -217,7 +217,7 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) } static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = { - .family = FIB_RULES_DECNET, + .family = AF_DECnet, .rule_size = sizeof(struct dn_fib_rule), .addr_size = sizeof(u16), .action = dn_fib_rule_action, diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 8ab62a56701c..76daeb5ff564 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -246,7 +246,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops) } static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { - .family = FIB_RULES_IPV4, + .family = AF_INET, .rule_size = sizeof(struct fib4_rule), .addr_size = sizeof(u32), .action = fib4_rule_action, diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7d3e382aed64..41e8fc0ce8b3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -217,7 +217,7 @@ static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, } static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { - .family = FIB_RULES_IPMR, + .family = RTNL_FAMILY_IPMR, .rule_size = sizeof(struct ipmr_rule), .addr_size = sizeof(u32), .action = ipmr_rule_action, diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 35f6949446f0..8e44f8f9c188 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -238,7 +238,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) } static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = { - .family = FIB_RULES_IPV6, + .family = AF_INET6, .rule_size = sizeof(struct fib6_rule), .addr_size = sizeof(struct in6_addr), .action = fib6_rule_action, -- cgit v1.2.3 From cb6a4e461fb427689920472bd7335f926d521747 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 26 Apr 2010 16:02:08 +0200 Subject: net: ipmr: add support for dumping routing tables over netlink The ipmr /proc interface (ip_mr_cache) can't be extended to dump routes from any tables but the main table in a backwards compatible fashion since the output format ends in a variable amount of output interfaces. Introduce a new netlink interface to dump multicast routes from all tables, similar to the netlink interface for regular routes. Signed-off-by: Patrick McHardy --- net/ipv4/ipmr.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 7 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 41e8fc0ce8b3..eddfd12f55b8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -128,8 +128,8 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt, int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); -static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, - struct mfc_cache *c, struct rtmsg *rtm); +static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mfc_cache *c, struct rtmsg *rtm); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -831,7 +831,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); - if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { + if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = (skb_tail_pointer(skb) - (u8 *)nlh); } else { @@ -1904,9 +1904,8 @@ drop: } #endif -static int -ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, - struct rtmsg *rtm) +static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + struct mfc_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; @@ -1994,11 +1993,93 @@ int ipmr_get_route(struct net *net, if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; - err = ipmr_fill_mroute(mrt, skb, cache, rtm); + err = __ipmr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); return err; } +static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + u32 pid, u32 seq, struct mfc_cache *c) +{ + struct nlmsghdr *nlh; + struct rtmsg *rtm; + + nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); + if (nlh == NULL) + return -EMSGSIZE; + + rtm = nlmsg_data(nlh); + rtm->rtm_family = RTNL_FAMILY_IPMR; + rtm->rtm_dst_len = 32; + rtm->rtm_src_len = 32; + rtm->rtm_tos = 0; + rtm->rtm_table = mrt->id; + NLA_PUT_U32(skb, RTA_TABLE, mrt->id); + rtm->rtm_type = RTN_MULTICAST; + rtm->rtm_scope = RT_SCOPE_UNIVERSE; + rtm->rtm_protocol = RTPROT_UNSPEC; + rtm->rtm_flags = 0; + + NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); + NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); + + if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct mr_table *mrt; + struct mfc_cache *mfc; + unsigned int t = 0, s_t; + unsigned int h = 0, s_h; + unsigned int e = 0, s_e; + + s_t = cb->args[0]; + s_h = cb->args[1]; + s_e = cb->args[2]; + + read_lock(&mrt_lock); + ipmr_for_each_table(mrt, net) { + if (t < s_t) + goto next_table; + if (t > s_t) + s_h = 0; + for (h = s_h; h < MFC_LINES; h++) { + list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) { + if (e < s_e) + goto next_entry; + if (ipmr_fill_mroute(mrt, skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + mfc) < 0) + goto done; +next_entry: + e++; + } + e = s_e = 0; + } + s_h = 0; +next_table: + t++; + } +done: + read_unlock(&mrt_lock); + + cb->args[2] = e; + cb->args[1] = h; + cb->args[0] = t; + + return skb->len; +} + #ifdef CONFIG_PROC_FS /* * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif @@ -2355,6 +2436,7 @@ int __init ip_mr_init(void) goto add_proto_fail; } #endif + rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute); return 0; #ifdef CONFIG_IP_PIMSM_V2 -- cgit v1.2.3 From d19d56ddc88e7895429ef118db9c83c7bbe3ce6a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 17 May 2010 22:36:55 -0700 Subject: net: Introduce skb_tunnel_rx() helper skb rxhash should be cleared when a skb is handled by a tunnel before being delivered again, so that correct packet steering can take place. There are other cleanups and accounting that we can factorize in a new helper, skb_tunnel_rx() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/dst.h | 20 ++++++++++++++++++++ net/ipv4/ip_gre.c | 9 +-------- net/ipv4/ipip.c | 7 ++----- net/ipv4/ipmr.c | 8 +++----- net/ipv6/ip6_tunnel.c | 8 ++------ net/ipv6/ip6mr.c | 8 +++----- net/ipv6/sit.c | 8 +++----- 7 files changed, 34 insertions(+), 34 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/include/net/dst.h b/include/net/dst.h index 27207a13f2a6..612069beda73 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -226,6 +226,26 @@ static inline void skb_dst_force(struct sk_buff *skb) } } + +/** + * skb_tunnel_rx - prepare skb for rx reinsert + * @skb: buffer + * @dev: tunnel device + * + * After decapsulation, packet is going to re-enter (netif_rx()) our stack, + * so make some cleanups, and perform accounting. + */ +static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) +{ + skb->dev = dev; + /* TODO : stats should be SMP safe */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + skb->rxhash = 0; + skb_dst_drop(skb); + nf_reset(skb); +} + /* Children define the path of the packet through the * Linux networking. Thus, destinations are stackable. */ diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index fe381d12ecdd..498cf69c7977 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -538,7 +538,6 @@ static int ipgre_rcv(struct sk_buff *skb) struct ip_tunnel *tunnel; int offset = 4; __be16 gre_proto; - unsigned int len; if (!pskb_may_pull(skb, 16)) goto drop_nolock; @@ -629,8 +628,6 @@ static int ipgre_rcv(struct sk_buff *skb) tunnel->i_seqno = seqno + 1; } - len = skb->len; - /* Warning: All skb pointers will be invalidated! */ if (tunnel->dev->type == ARPHRD_ETHER) { if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -644,11 +641,7 @@ static int ipgre_rcv(struct sk_buff *skb) skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } - stats->rx_packets++; - stats->rx_bytes += len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + skb_tunnel_rx(skb, tunnel->dev); skb_reset_network_header(skb); ipgre_ecn_decapsulate(iph, skb); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 0b27b14dcc9d..7fd636711037 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -374,11 +374,8 @@ static int ipip_rcv(struct sk_buff *skb) skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; - tunnel->dev->stats.rx_packets++; - tunnel->dev->stats.rx_bytes += skb->len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + skb_tunnel_rx(skb, tunnel->dev); + ipip_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7a7ee1cc3b5a..217ebe035b34 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1831,14 +1831,12 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, skb->mac_header = skb->network_header; skb_pull(skb, (u8*)encap - skb->data); skb_reset_network_header(skb); - skb->dev = reg_dev; skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; - skb_dst_drop(skb); - reg_dev->stats.rx_bytes += skb->len; - reg_dev->stats.rx_packets++; - nf_reset(skb); + + skb_tunnel_rx(skb, reg_dev); + netif_rx(skb); dev_put(reg_dev); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2599870747ec..8f39893d8081 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -723,14 +723,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); - skb->dev = t->dev; - skb_dst_drop(skb); - nf_reset(skb); - dscp_ecn_decapsulate(t, ipv6h, skb); + skb_tunnel_rx(skb, t->dev); - t->dev->stats.rx_packets++; - t->dev->stats.rx_bytes += skb->len; + dscp_ecn_decapsulate(t, ipv6h, skb); netif_rx(skb); rcu_read_unlock(); return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 163850e22b11..bd9e7d3e9c8e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -658,14 +658,12 @@ static int pim6_rcv(struct sk_buff *skb) skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); - skb->dev = reg_dev; skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; - skb_dst_drop(skb); - reg_dev->stats.rx_bytes += skb->len; - reg_dev->stats.rx_packets++; - nf_reset(skb); + + skb_tunnel_rx(skb, reg_dev); + netif_rx(skb); dev_put(reg_dev); return 0; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 5abae10cd884..e51e650ea80b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -566,11 +566,9 @@ static int ipip6_rcv(struct sk_buff *skb) kfree_skb(skb); return 0; } - tunnel->dev->stats.rx_packets++; - tunnel->dev->stats.rx_bytes += skb->len; - skb->dev = tunnel->dev; - skb_dst_drop(skb); - nf_reset(skb); + + skb_tunnel_rx(skb, tunnel->dev); + ipip6_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); -- cgit v1.2.3 From 3fa21e07e6acefa31f974d57fba2b6920a7ebd1a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 17 May 2010 23:08:21 -0700 Subject: net: Remove unnecessary returns from void function()s This patch removes from net/ (but not any netfilter files) all the unnecessary return; statements that precede the last closing brace of void functions. It does not remove the returns that are immediately preceded by a label as gcc doesn't like that. Done via: $ grep -rP --include=*.[ch] -l "return;\n}" net/ | \ xargs perl -i -e 'local $/ ; while (<>) { s/\n[ \t\n]+return;\n}/\n}/g; print; }' Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- net/9p/trans_rdma.c | 1 - net/atm/br2684.c | 1 - net/atm/lec.c | 6 ------ net/atm/mpc.c | 32 -------------------------------- net/atm/mpoa_caches.c | 20 -------------------- net/bluetooth/hci_core.c | 2 -- net/bluetooth/l2cap.c | 3 --- net/bluetooth/rfcomm/tty.c | 2 -- net/bluetooth/sco.c | 1 - net/caif/caif_dev.c | 1 - net/can/bcm.c | 2 -- net/decnet/dn_dev.c | 3 --- net/decnet/dn_route.c | 1 - net/ipv4/cipso_ipv4.c | 2 -- net/ipv4/fib_trie.c | 2 -- net/ipv4/ip_gre.c | 1 - net/ipv4/ip_options.c | 1 - net/ipv4/ipmr.c | 1 - net/ipv6/ndisc.c | 2 -- net/ipv6/proc.c | 1 - net/ipv6/route.c | 2 -- net/irda/iriap.c | 2 -- net/irda/irnet/irnet_irda.c | 3 --- net/iucv/af_iucv.c | 1 - net/mac80211/debugfs.h | 1 - net/mac80211/mesh.c | 2 -- net/mac80211/mesh_hwmp.c | 1 - net/netlabel/netlabel_addrlist.h | 2 -- net/netlabel/netlabel_unlabeled.c | 1 - net/sched/cls_flow.c | 1 - net/sched/sch_hfsc.c | 1 - net/sched/sch_ingress.c | 1 - net/sched/sch_mq.c | 1 - net/sched/sch_multiq.c | 1 - net/sched/sch_prio.c | 1 - net/sched/sch_red.c | 1 - net/sctp/associola.c | 2 -- net/sctp/outqueue.c | 2 -- net/sctp/proc.c | 3 --- net/sctp/sm_sideeffect.c | 4 ---- net/sctp/ulpqueue.c | 2 -- net/sunrpc/clnt.c | 1 - net/sunrpc/svcsock.c | 1 - net/sunrpc/xprt.c | 1 - net/sunrpc/xprtsock.c | 4 ---- net/sysctl_net.c | 1 - net/wimax/stack.c | 2 -- net/xfrm/xfrm_policy.c | 1 - 48 files changed, 131 deletions(-) (limited to 'net/ipv4/ipmr.c') diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 041101ab4aa5..0ea20c30466c 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -308,7 +308,6 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, req, err, status); rdma->state = P9_RDMA_FLUSHING; client->status = Disconnected; - return; } static void diff --git a/net/atm/br2684.c b/net/atm/br2684.c index d6c7ceaf13e9..6719af6a59fa 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -446,7 +446,6 @@ error: net_dev->stats.rx_errors++; free_skb: dev_kfree_skb(skb); - return; } /* diff --git a/net/atm/lec.c b/net/atm/lec.c index feeaf5718472..d98bde1a0ac8 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -161,8 +161,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } - - return; } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ @@ -640,7 +638,6 @@ static void lec_set_multicast_list(struct net_device *dev) * by default, all multicast frames arrive over the bus. * eventually support selective multicast service */ - return; } static const struct net_device_ops lec_netdev_ops = { @@ -1199,8 +1196,6 @@ static void __exit lane_module_cleanup(void) dev_lec[i] = NULL; } } - - return; } module_init(lane_module_init); @@ -1334,7 +1329,6 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, priv->lane2_ops->associate_indicator(dev, mac_addr, tlvs, sizeoftlvs); } - return; } /* diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 436f2e177657..622b471e14e0 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -455,7 +455,6 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, if (end_of_tlvs - tlvs != 0) pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n", dev->name, end_of_tlvs - tlvs); - return; } /* @@ -684,8 +683,6 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev) if (in_entry == NULL && eg_entry == NULL) dprintk("(%s) unused vcc closed\n", dev->name); - - return; } static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) @@ -783,8 +780,6 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(new_skb); - - return; } static struct atmdev_ops mpc_ops = { /* only send is required */ @@ -873,8 +868,6 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc) mesg.type = SET_MPS_CTRL_ADDR; memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); msg_to_mpoad(&mesg, mpc); - - return; } static void mpoad_close(struct atm_vcc *vcc) @@ -911,8 +904,6 @@ static void mpoad_close(struct atm_vcc *vcc) pr_info("(%s) going down\n", (mpc->dev) ? mpc->dev->name : ""); module_put(THIS_MODULE); - - return; } /* @@ -1122,7 +1113,6 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) pr_info("(%s) entry already in resolving state\n", (mpc->dev) ? mpc->dev->name : ""); mpc->in_ops->put(entry); - return; } /* @@ -1166,7 +1156,6 @@ static void check_qos_and_open_shortcut(struct k_message *msg, } else memset(&msg->qos, 0, sizeof(struct atm_qos)); msg_to_mpoad(msg, client); - return; } static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) @@ -1240,8 +1229,6 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) mpc->in_ops->put(entry); entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); } while (entry != NULL); - - return; } static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) @@ -1260,8 +1247,6 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); - - return; } static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) @@ -1295,8 +1280,6 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); dprintk("exiting\n"); - - return; } /* @@ -1325,8 +1308,6 @@ static void mps_death(struct k_message *msg, struct mpoa_client *mpc) mpc->in_ops->destroy_cache(mpc); mpc->eg_ops->destroy_cache(mpc); - - return; } static void MPOA_cache_impos_rcvd(struct k_message *msg, @@ -1353,8 +1334,6 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, write_unlock_irq(&mpc->egress_lock); mpc->eg_ops->put(entry); - - return; } static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, @@ -1392,8 +1371,6 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, pr_info("(%s) targetless LE_ARP request failed\n", mpc->dev->name); } - - return; } static void set_mps_mac_addr_rcvd(struct k_message *msg, @@ -1409,8 +1386,6 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg, return; } client->number_of_mps_macs = 1; - - return; } /* @@ -1436,7 +1411,6 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action) msg->type = action; msg_to_mpoad(msg, mpc); - return; } static void mpc_timer_refresh(void) @@ -1445,8 +1419,6 @@ static void mpc_timer_refresh(void) mpc_timer.data = mpc_timer.expires; mpc_timer.function = mpc_cache_check; add_timer(&mpc_timer); - - return; } static void mpc_cache_check(unsigned long checking_time) @@ -1471,8 +1443,6 @@ static void mpc_cache_check(unsigned long checking_time) mpc = mpc->next; } mpc_timer_refresh(); - - return; } static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, @@ -1561,8 +1531,6 @@ static void __exit atm_mpoa_cleanup(void) kfree(qos); qos = nextqos; } - - return; } module_init(atm_mpoa_init); diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index e773d8336918..d1b2d9a03144 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c @@ -182,8 +182,6 @@ static void in_cache_put(in_cache_entry *entry) memset(entry, 0, sizeof(in_cache_entry)); kfree(entry); } - - return; } /* @@ -221,8 +219,6 @@ static void in_cache_remove_entry(in_cache_entry *entry, } vcc_release_async(vcc, -EPIPE); } - - return; } /* Call this every MPC-p2 seconds... Not exactly correct solution, @@ -248,8 +244,6 @@ static void clear_count_and_expired(struct mpoa_client *client) entry = next_entry; } write_unlock_bh(&client->ingress_lock); - - return; } /* Call this every MPC-p4 seconds. */ @@ -334,8 +328,6 @@ static void in_destroy_cache(struct mpoa_client *mpc) while (mpc->in_cache != NULL) mpc->in_ops->remove_entry(mpc->in_cache, mpc); write_unlock_irq(&mpc->ingress_lock); - - return; } static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, @@ -427,8 +419,6 @@ static void eg_cache_put(eg_cache_entry *entry) memset(entry, 0, sizeof(eg_cache_entry)); kfree(entry); } - - return; } /* @@ -463,8 +453,6 @@ static void eg_cache_remove_entry(eg_cache_entry *entry, } vcc_release_async(vcc, -EPIPE); } - - return; } static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, @@ -509,8 +497,6 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time) do_gettimeofday(&(entry->tv)); entry->entry_state = EGRESS_RESOLVED; entry->ctrl_info.holding_time = holding_time; - - return; } static void clear_expired(struct mpoa_client *client) @@ -537,8 +523,6 @@ static void clear_expired(struct mpoa_client *client) entry = next_entry; } write_unlock_irq(&client->egress_lock); - - return; } static void eg_destroy_cache(struct mpoa_client *mpc) @@ -547,8 +531,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc) while (mpc->eg_cache != NULL) mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); write_unlock_irq(&mpc->egress_lock); - - return; } @@ -584,6 +566,4 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc) { mpc->in_ops = &ingress_ops; mpc->eg_ops = &egress_ops; - - return; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 5e83f8e0877a..2f768de87011 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1316,8 +1316,6 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) } tasklet_schedule(&hdev->tx_task); - - return; } EXPORT_SYMBOL(hci_send_acl); diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 673a36886716..1b682a5aa061 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1322,8 +1322,6 @@ static void l2cap_drop_acked_frames(struct sock *sk) if (!l2cap_pi(sk)->unacked_frames) del_timer(&l2cap_pi(sk)->retrans_timer); - - return; } static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) @@ -4667,7 +4665,6 @@ void l2cap_load(void) /* Dummy function to trigger automatic L2CAP module loading by * other modules that use L2CAP sockets but don't use any other * symbols from it. */ - return; } EXPORT_SYMBOL(l2cap_load); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cab71ea2796d..309b6c261b25 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -1014,8 +1014,6 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, data_bits, stop_bits, parity, RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); - - return; } static void rfcomm_tty_throttle(struct tty_struct *tty) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 4767928a93d3..d0927d1fdada 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -273,7 +273,6 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) drop: kfree_skb(skb); - return; } /* -------- Socket interface ---------- */ diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 024fd5bb2d39..e2b86f1f5a47 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -112,7 +112,6 @@ static void caif_device_destroy(struct net_device *dev) spin_unlock_bh(&caifdevs->lock); kfree(caifd); - return; } static int transmit(struct cflayer *layer, struct cfpkt *pkt) diff --git a/net/can/bcm.c b/net/can/bcm.c index 907dc871fac8..9c65e9deb9c3 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -713,8 +713,6 @@ static void bcm_remove_op(struct bcm_op *op) kfree(op->last_frames); kfree(op); - - return; } static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 615dbe3b43f9..4c409b46aa35 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1220,17 +1220,14 @@ void dn_dev_down(struct net_device *dev) void dn_dev_init_pkt(struct sk_buff *skb) { - return; } void dn_dev_veri_pkt(struct sk_buff *skb) { - return; } void dn_dev_hello(struct sk_buff *skb) { - return; } void dn_dev_devices_off(void) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a8432e399545..812e6dff6067 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -264,7 +264,6 @@ static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) static void dn_dst_link_failure(struct sk_buff *skb) { - return; } static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index c97cd9ff697e..3a92a76ae41d 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -290,8 +290,6 @@ void cipso_v4_cache_invalidate(void) cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } - - return; } /** diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index c98f115fb0fd..79d057a939ba 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1022,8 +1022,6 @@ static void trie_rebalance(struct trie *t, struct tnode *tn) rcu_assign_pointer(t->trie, (struct node *)tn); tnode_free_flush(); - - return; } /* only used from updater-side */ diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 498cf69c7977..32618e11076d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -502,7 +502,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info) t->err_time = jiffies; out: rcu_read_unlock(); - return; } static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 3244133c24f6..ba9836c488ed 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -238,7 +238,6 @@ void ip_options_fragment(struct sk_buff * skb) opt->rr_needaddr = 0; opt->ts_needaddr = 0; opt->ts_needtime = 0; - return; } /* diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 217ebe035b34..7bcacf627b46 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1606,7 +1606,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, out_free: kfree_skb(skb); - return; } static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3f7c12b70a26..0abdc242ddb7 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -890,8 +890,6 @@ out: in6_ifa_put(ifp); else in6_dev_put(idev); - - return; } static void ndisc_recv_na(struct sk_buff *skb) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 458eabfbe130..566798d69f37 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -168,7 +168,6 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) i & 0x100 ? "Out" : "In", i & 0xff); seq_printf(seq, "%-32s\t%lu\n", name, val); } - return; } static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 05ebd7833043..294cbe8b0725 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -316,7 +316,6 @@ static void rt6_probe(struct rt6_info *rt) #else static inline void rt6_probe(struct rt6_info *rt) { - return; } #endif @@ -1553,7 +1552,6 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, out: dst_release(&rt->u.dst); - return; } /* diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 79a1e5a23e10..fce364c6c71a 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -685,8 +685,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, /* We have a match; send the value. */ iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS, attrib->value); - - return; } /* diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index df18ab4b6c5e..e98e40d76f4f 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c @@ -678,7 +678,6 @@ irda_irnet_destroy(irnet_socket * self) self->stsap_sel = 0; DEXIT(IRDA_SOCK_TRACE, "\n"); - return; } @@ -928,7 +927,6 @@ irnet_disconnect_server(irnet_socket * self, irttp_listen(self->tsap); DEXIT(IRDA_SERV_TRACE, "\n"); - return; } /*------------------------------------------------------------------*/ @@ -1013,7 +1011,6 @@ irnet_destroy_server(void) irda_irnet_destroy(&irnet_server.s); DEXIT(IRDA_SERV_TRACE, "\n"); - return; } diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 8be324fe08b9..c8b4599a752e 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -136,7 +136,6 @@ static void afiucv_pm_complete(struct device *dev) #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_complete\n"); #endif - return; } /** diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index 68e6a2050f9a..09cc9be34796 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h @@ -7,7 +7,6 @@ extern int mac80211_open_file_generic(struct inode *inode, struct file *file); #else static inline void debugfs_hw_add(struct ieee80211_local *local) { - return; } #endif diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 7e93524459fc..bde81031727a 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -287,8 +287,6 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) *pos++ |= sdata->u.mesh.accepting_plinks ? MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; *pos++ = 0x00; - - return; } u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d89ed7f2592b..0705018d8d1e 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -624,7 +624,6 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, fail: rcu_read_unlock(); sdata->u.mesh.mshstats.dropped_frames_no_route++; - return; } static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h index 07ae7fd82be1..1c1c093cf279 100644 --- a/net/netlabel/netlabel_addrlist.h +++ b/net/netlabel/netlabel_addrlist.h @@ -130,7 +130,6 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask) { - return; } #endif @@ -203,7 +202,6 @@ static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, const struct in6_addr *addr, const struct in6_addr *mask) { - return; } #endif #endif /* IPV6 */ diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index a3d64aabe2f7..e2b0a680dd56 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -670,7 +670,6 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) unlhsh_condremove_failure: spin_unlock(&netlbl_unlhsh_lock); - return; } /** diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 6ed61b10e002..f73542d2cdd0 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -602,7 +602,6 @@ static unsigned long flow_get(struct tcf_proto *tp, u32 handle) static void flow_put(struct tcf_proto *tp, unsigned long f) { - return; } static int flow_dump(struct tcf_proto *tp, unsigned long fh, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index a435cf13cc27..abd904be4287 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -617,7 +617,6 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) rtsc->y = y; rtsc->dx = dx; rtsc->dy = dy; - return; } static void diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index a9e646bdb605..f10e34a68445 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -44,7 +44,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { - return; } static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index b2aba3f5e6fa..fe91e50f9d98 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -174,7 +174,6 @@ static unsigned long mq_get(struct Qdisc *sch, u32 classid) static void mq_put(struct Qdisc *sch, unsigned long cl) { - return; } static int mq_dump_class(struct Qdisc *sch, unsigned long cl, diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index c50876cd8704..6ae251279fc2 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -340,7 +340,6 @@ static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, static void multiq_put(struct Qdisc *q, unsigned long cl) { - return; } static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 81672e0c1b25..0748fb1e3a49 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -303,7 +303,6 @@ static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 clas static void prio_put(struct Qdisc *q, unsigned long cl) { - return; } static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 072cdf442f8e..8d42bb3ba540 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -303,7 +303,6 @@ static unsigned long red_get(struct Qdisc *sch, u32 classid) static void red_put(struct Qdisc *sch, unsigned long arg) { - return; } static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 3912420cedcc..e41feff19e43 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -816,8 +816,6 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, if (t != primary) sctp_assoc_rm_peer(asoc, t); } - - return; } /* Engage in transport control operations. diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5d057178ce0c..c04b2eb59186 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -80,7 +80,6 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, { list_add(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; - return; } /* Take data from the front of the queue. */ @@ -103,7 +102,6 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q, { list_add_tail(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; - return; } /* diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 784bcc9a979d..61aacfbbaa92 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -181,7 +181,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { - return; } @@ -286,7 +285,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { - return; } @@ -409,7 +407,6 @@ static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { - return; } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 22e670200449..f5e5e27cac5e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -892,8 +892,6 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, sctp_walk_fwdtsn(skip, chunk) { sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); } - - return; } /* Helper function to remove the association non-primary peer @@ -912,8 +910,6 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) sctp_assoc_del_peer(asoc, &t->ipaddr); } } - - return; } /* Helper function to set sk_err on a 1-1 style socket. */ diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 3a448536f0b6..c7f7e49609cb 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -955,7 +955,6 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) * ordering and deliver them if needed. */ sctp_ulpq_reap_ordered(ulpq, sid); - return; } static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, @@ -1064,7 +1063,6 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, } sk_mem_reclaim(asoc->base.sk); - return; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 19c9983d5360..462462eaf296 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1518,7 +1518,6 @@ call_refreshresult(struct rpc_task *task) task->tk_action = call_refresh; if (status != -ETIMEDOUT) rpc_delay(task, 3*HZ); - return; } static __be32 * diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index ce0d5b35c2ac..76e504bf74d0 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -150,7 +150,6 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) } break; } - return; } /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 699ade68aac1..2e3d502ffe87 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -716,7 +716,6 @@ void xprt_connect(struct rpc_task *task) xprt->stat.connect_start = jiffies; xprt->ops->connect(task); } - return; } static void xprt_connect_status(struct rpc_task *task) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9847c30b5001..6e0df664b255 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1050,8 +1050,6 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, if (transport->tcp_flags & TCP_RCV_LAST_FRAG) transport->tcp_flags &= ~TCP_RCV_COPY_DATA; } - - return; } /* @@ -2210,7 +2208,6 @@ static int bc_send_request(struct rpc_task *task) static void bc_close(struct rpc_xprt *xprt) { - return; } /* @@ -2220,7 +2217,6 @@ static void bc_close(struct rpc_xprt *xprt) static void bc_destroy(struct rpc_xprt *xprt) { - return; } static struct rpc_xprt_ops xs_udp_ops = { diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 53196009160a..ca84212cfbfe 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -82,7 +82,6 @@ static int __net_init sysctl_net_init(struct net *net) static void __net_exit sysctl_net_exit(struct net *net) { WARN_ON(!list_empty(&net->sysctls.list)); - return; } static struct pernet_operations sysctl_pernet_ops = { diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 62b1a6662209..ee99e7dfcdba 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -320,7 +320,6 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) out: d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", wimax_dev, new_state, old_state); - return; } @@ -362,7 +361,6 @@ void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) if (wimax_dev->state > __WIMAX_ST_NULL) __wimax_state_change(wimax_dev, new_state); mutex_unlock(&wimax_dev->mutex); - return; } EXPORT_SYMBOL_GPL(wimax_state_change); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f4ea3a08e5a1..d965a2bad8d3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2209,7 +2209,6 @@ EXPORT_SYMBOL(xfrm_dst_ifdown); static void xfrm_link_failure(struct sk_buff *skb) { /* Impossible. Such dst must be popped before reaches point of failure. */ - return; } static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) -- cgit v1.2.3