summaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c174
1 files changed, 87 insertions, 87 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2daa0dc19d33..37e0d4d5cf94 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -20,7 +20,7 @@
* (rco@di.uminho.pt) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
- * Alan Cox : Super /proc >4K
+ * Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
@@ -38,7 +38,7 @@
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
- *
+ *
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
@@ -70,7 +70,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/string.h>
@@ -289,7 +288,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
{
struct rt_cache_iter_state *st = rcu_dereference(seq->private);
- r = r->u.rt_next;
+ r = r->u.dst.rt_next;
while (!r) {
rcu_read_unlock_bh();
if (--st->bucket < 0)
@@ -361,8 +360,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
dev_queue_xmit) : 0,
r->rt_spec_dst);
seq_printf(seq, "%-127s\n", temp);
- }
- return 0;
+ }
+ return 0;
}
static struct seq_operations rt_cache_seq_ops = {
@@ -393,7 +392,7 @@ out_kfree:
goto out;
}
-static struct file_operations rt_cache_seq_fops = {
+static const struct file_operations rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
@@ -429,7 +428,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
-
+
}
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
@@ -445,7 +444,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
-
+
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
atomic_read(&ipv4_dst_ops.entries),
@@ -459,7 +458,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
st->out_hit,
st->out_slow_tot,
- st->out_slow_mc,
+ st->out_slow_mc,
st->gc_total,
st->gc_ignored,
@@ -484,7 +483,7 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
return seq_open(file, &rt_cpu_seq_ops);
}
-static struct file_operations rt_cpu_seq_fops = {
+static const struct file_operations rt_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
@@ -493,7 +492,7 @@ static struct file_operations rt_cpu_seq_fops = {
};
#endif /* CONFIG_PROC_FS */
-
+
static __inline__ void rt_free(struct rtable *rt)
{
multipath_remove(rt);
@@ -512,7 +511,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rth->fl.iif && rth->u.rt_next;
+ rth->fl.iif && rth->u.dst.rt_next;
}
static __inline__ int rt_valuable(struct rtable *rth)
@@ -595,10 +594,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
compare_keys(&(*rthp)->fl, &expentry->fl)) {
if (*rthp == expentry) {
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
continue;
} else {
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
rt_free(rth);
if (removed_count)
++(*removed_count);
@@ -606,9 +605,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
} else {
if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
passedexpired && !nextstep)
- nextstep = &rth->u.rt_next;
+ nextstep = &rth->u.dst.rt_next;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
}
}
@@ -649,12 +648,12 @@ static void rt_check_expire(unsigned long dummy)
/* Entry is expired even if it is in use */
if (time_before_eq(now, rth->u.dst.expires)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
continue;
}
@@ -668,12 +667,12 @@ static void rt_check_expire(unsigned long dummy)
if (!rthp)
break;
} else {
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
rt_free(rth);
}
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- *rthp = rth->u.rt_next;
- rt_free(rth);
+ *rthp = rth->u.dst.rt_next;
+ rt_free(rth);
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
}
spin_unlock(rt_hash_lock_addr(i));
@@ -706,7 +705,7 @@ static void rt_run_flush(unsigned long dummy)
spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth; rth = next) {
- next = rth->u.rt_next;
+ next = rth->u.dst.rt_next;
rt_free(rth);
}
}
@@ -739,7 +738,7 @@ void rt_cache_flush(int delay)
if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
tmo = 0;
-
+
if (delay > tmo)
delay = tmo;
}
@@ -840,7 +839,7 @@ static int rt_garbage_collect(void)
while ((rth = *rthp) != NULL) {
if (!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
continue;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -858,12 +857,12 @@ static int rt_garbage_collect(void)
if (!rthp)
break;
} else {
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
rt_free(rth);
goal--;
}
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
rt_free(rth);
goal--;
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
@@ -947,13 +946,13 @@ restart:
if (compare_keys(&rth->fl, &rt->fl)) {
#endif
/* Put it first */
- *rthp = rth->u.rt_next;
+ *rthp = rth->u.dst.rt_next;
/*
* Since lookup is lockfree, the deletion
* must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain.
*/
- rcu_assign_pointer(rth->u.rt_next,
+ rcu_assign_pointer(rth->u.dst.rt_next,
rt_hash_table[hash].chain);
/*
* Since lookup is lockfree, the update writes
@@ -983,7 +982,7 @@ restart:
chain_length++;
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
}
if (cand) {
@@ -994,7 +993,7 @@ restart:
* only 2 entries per bucket. We will see.
*/
if (chain_length > ip_rt_gc_elasticity) {
- *candp = cand->u.rt_next;
+ *candp = cand->u.dst.rt_next;
rt_free(cand);
}
}
@@ -1034,13 +1033,13 @@ restart:
}
}
- rt->u.rt_next = rt_hash_table[hash].chain;
+ rt->u.dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2
- if (rt->u.rt_next) {
+ if (rt->u.dst.rt_next) {
struct rtable *trt;
printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
NIPQUAD(rt->rt_dst));
- for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
+ for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
printk("\n");
}
@@ -1104,7 +1103,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
return;
}
} else
- printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
+ printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
ip_select_fb_ident(iph);
@@ -1117,9 +1116,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt);
for (rthp = &rt_hash_table[hash].chain; *rthp;
- rthp = &(*rthp)->u.rt_next)
+ rthp = &(*rthp)->u.dst.rt_next)
if (*rthp == rt) {
- *rthp = rt->u.rt_next;
+ *rthp = rt->u.dst.rt_next;
rt_free(rt);
break;
}
@@ -1167,7 +1166,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rth->fl.fl4_src != skeys[i] ||
rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0) {
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.rt_next;
continue;
}
@@ -1190,7 +1189,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
/* Copy all the information. */
*rt = *rth;
- INIT_RCU_HEAD(&rt->u.dst.rcu_head);
+ INIT_RCU_HEAD(&rt->u.dst.rcu_head);
rt->u.dst.__use = 1;
atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.child = NULL;
@@ -1225,11 +1224,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt_drop(rt);
goto do_next;
}
-
+
netevent.old = &rth->u.dst;
netevent.new = &rt->u.dst;
- call_netevent_notifiers(NETEVENT_REDIRECT,
- &netevent);
+ call_netevent_notifiers(NETEVENT_REDIRECT,
+ &netevent);
rt_del(hash, rth);
if (!rt_intern_hash(hash, rt, &rt))
@@ -1343,7 +1342,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
#endif
}
out:
- in_dev_put(in_dev);
+ in_dev_put(in_dev);
}
static int ip_error(struct sk_buff *skb)
@@ -1379,7 +1378,7 @@ static int ip_error(struct sk_buff *skb)
out: kfree_skb(skb);
return 0;
-}
+}
/*
* The last two values are not from the RFC but
@@ -1392,7 +1391,7 @@ static const unsigned short mtu_plateau[] =
static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
{
int i;
-
+
for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
if (old_mtu > mtu_plateau[i])
return mtu_plateau[i];
@@ -1416,7 +1415,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == skeys[i] &&
rth->rt_dst == daddr &&
@@ -1436,7 +1435,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
mtu = guess_mtu(old_mtu);
}
if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
- if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
+ if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
dst_confirm(&rth->u.dst);
if (mtu < ip_rt_min_pmtu) {
mtu = ip_rt_min_pmtu;
@@ -1600,7 +1599,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
#endif
set_class_tag(rt, itag);
#endif
- rt->rt_type = res->type;
+ rt->rt_type = res->type;
}
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -1714,11 +1713,11 @@ static void ip_handle_martian_source(struct net_device *dev,
#endif
}
-static inline int __mkroute_input(struct sk_buff *skb,
- struct fib_result* res,
- struct in_device *in_dev,
+static inline int __mkroute_input(struct sk_buff *skb,
+ struct fib_result* res,
+ struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos,
- struct rtable **result)
+ struct rtable **result)
{
struct rtable *rth;
@@ -1738,12 +1737,12 @@ static inline int __mkroute_input(struct sk_buff *skb,
}
- err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
+ err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, &spec_dst, &itag);
if (err < 0) {
- ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
+ ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
-
+
err = -EINVAL;
goto cleanup;
}
@@ -1811,10 +1810,10 @@ static inline int __mkroute_input(struct sk_buff *skb,
/* release the working reference to the output device */
in_dev_put(out_dev);
return err;
-}
+}
-static inline int ip_mkroute_input_def(struct sk_buff *skb,
- struct fib_result* res,
+static inline int ip_mkroute_input_def(struct sk_buff *skb,
+ struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
@@ -1835,11 +1834,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
/* put it into the cache */
hash = rt_hash(daddr, saddr, fl->iif);
- return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
+ return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
}
-static inline int ip_mkroute_input(struct sk_buff *skb,
- struct fib_result* res,
+static inline int ip_mkroute_input(struct sk_buff *skb,
+ struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
@@ -1859,7 +1858,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
if (hopcount < 2)
return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
saddr, tos);
-
+
/* add all alternatives to the routing cache */
for (hop = 0; hop < hopcount; hop++) {
res->nh_sel = hop;
@@ -1988,7 +1987,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto e_nobufs;
if (err == -EINVAL)
goto e_inval;
-
+
done:
in_dev_put(in_dev);
if (free_res)
@@ -2071,8 +2070,8 @@ martian_destination:
#endif
e_hostunreach:
- err = -EHOSTUNREACH;
- goto done;
+ err = -EHOSTUNREACH;
+ goto done;
e_inval:
err = -EINVAL;
@@ -2099,7 +2098,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == saddr &&
rth->fl.iif == iif &&
@@ -2153,11 +2152,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
}
static inline int __mkroute_output(struct rtable **result,
- struct fib_result* res,
+ struct fib_result* res,
const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
+ const struct flowi *oldflp,
+ struct net_device *dev_out,
+ unsigned flags)
{
struct rtable *rth;
struct in_device *in_dev;
@@ -2190,7 +2189,7 @@ static inline int __mkroute_output(struct rtable **result,
}
} else if (res->type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST|RTCF_LOCAL;
- if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
+ if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
flags &= ~RTCF_LOCAL;
/* If multicast route do not exist use
@@ -2208,7 +2207,7 @@ static inline int __mkroute_output(struct rtable **result,
if (!rth) {
err = -ENOBUFS;
goto cleanup;
- }
+ }
atomic_set(&rth->u.dst.__refcnt, 1);
rth->u.dst.flags= DST_HOST;
@@ -2232,7 +2231,7 @@ static inline int __mkroute_output(struct rtable **result,
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
- /* get references to the devices that are to be hold by the routing
+ /* get references to the devices that are to be hold by the routing
cache entry */
rth->u.dst.dev = dev_out;
dev_hold(dev_out);
@@ -2250,7 +2249,7 @@ static inline int __mkroute_output(struct rtable **result,
}
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
rth->rt_spec_dst = fl->fl4_src;
- if (flags & RTCF_LOCAL &&
+ if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->u.dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
@@ -2292,7 +2291,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
err = rt_intern_hash(hash, rth, rp);
}
-
+
return err;
}
@@ -2563,7 +2562,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.rt_next)) {
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
@@ -2635,7 +2634,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
@@ -2718,7 +2717,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
@@ -2824,12 +2824,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 0;
rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference(rt->u.rt_next), idx++) {
+ rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
if (idx < s_idx)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
rcu_read_unlock_bh();
@@ -2862,7 +2862,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
rt_cache_flush(flush_delay);
return 0;
- }
+ }
return -EINVAL;
}
@@ -2879,13 +2879,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
if (newlen != sizeof(int))
return -EINVAL;
if (get_user(delay, (int __user *)newval))
- return -EFAULT;
- rt_cache_flush(delay);
+ return -EFAULT;
+ rt_cache_flush(delay);
return 0;
}
ctl_table ipv4_route_table[] = {
- {
+ {
.ctl_name = NET_IPV4_ROUTE_FLUSH,
.procname = "flush",
.data = &flush_delay,
@@ -2930,7 +2930,7 @@ ctl_table ipv4_route_table[] = {
},
{
/* Deprecated. Use gc_min_interval_ms */
-
+
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
.procname = "gc_min_interval",
.data = &ip_rt_gc_min_interval,
@@ -3179,8 +3179,8 @@ int __init ip_rt_init(void)
{
struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
- !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
- proc_net_stat))) {
+ !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
+ proc_net_stat))) {
return -ENOMEM;
}
rtstat_pde->proc_fops = &rt_cpu_seq_fops;
OpenPOWER on IntegriCloud