summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/devinet.c61
-rw-r--r--net/ipv4/fib_frontend.c11
-rw-r--r--net/ipv4/igmp.c27
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/tcp_minisocks.c2
12 files changed, 88 insertions, 73 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c2045f9615da..7620382058a0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1174,39 +1174,54 @@ nla_put_failure:
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int idx, ip_idx;
+ int h, s_h;
+ int idx, s_idx;
+ int ip_idx, s_ip_idx;
struct net_device *dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
- int s_ip_idx, s_idx = cb->args[0];
+ struct hlist_head *head;
+ struct hlist_node *node;
- s_ip_idx = ip_idx = cb->args[1];
- idx = 0;
- for_each_netdev(net, dev) {
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- s_ip_idx = 0;
- in_dev = __in_dev_get_rtnl(dev);
- if (!in_dev)
- goto cont;
+ s_h = cb->args[0];
+ s_idx = idx = cb->args[1];
+ s_ip_idx = ip_idx = cb->args[2];
- for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
- ifa = ifa->ifa_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ s_ip_idx = 0;
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ goto cont;
+
+ for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
+ ifa = ifa->ifa_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+ if (inet_fill_ifaddr(skb, ifa,
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- RTM_NEWADDR, NLM_F_MULTI) <= 0)
- goto done;
- }
+ RTM_NEWADDR, NLM_F_MULTI) <= 0) {
+ rcu_read_unlock();
+ goto done;
+ }
+ }
cont:
- idx++;
+ idx++;
+ }
+ rcu_read_unlock();
}
done:
- cb->args[0] = idx;
- cb->args[1] = ip_idx;
+ cb->args[0] = h;
+ cb->args[1] = idx;
+ cb->args[2] = ip_idx;
return skb->len;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 816e2180bd60..6c1e56aef1f4 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -895,11 +895,11 @@ static void nl_fib_lookup_exit(struct net *net)
net->ipv4.fibnl = NULL;
}
-static void fib_disable_ip(struct net_device *dev, int force)
+static void fib_disable_ip(struct net_device *dev, int force, int delay)
{
if (fib_sync_down_dev(dev, force))
fib_flush(dev_net(dev));
- rt_cache_flush(dev_net(dev), 0);
+ rt_cache_flush(dev_net(dev), delay);
arp_ifdown(dev);
}
@@ -922,7 +922,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
/* Last address was deleted from this interface.
Disable IP.
*/
- fib_disable_ip(dev, 1);
+ fib_disable_ip(dev, 1, 0);
} else {
rt_cache_flush(dev_net(dev), -1);
}
@@ -937,7 +937,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (event == NETDEV_UNREGISTER) {
- fib_disable_ip(dev, 2);
+ fib_disable_ip(dev, 2, -1);
return NOTIFY_DONE;
}
@@ -955,10 +955,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), -1);
break;
case NETDEV_DOWN:
- fib_disable_ip(dev, 0);
+ fib_disable_ip(dev, 0, 0);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
+ case NETDEV_UNREGISTER_PERNET:
rt_cache_flush(dev_net(dev), 0);
break;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index bd24f6560a49..6110c6d6e613 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2313,7 +2313,8 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
state->in_dev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *in_dev;
- in_dev = in_dev_get(state->dev);
+
+ in_dev = __in_dev_get_rcu(state->dev);
if (!in_dev)
continue;
read_lock(&in_dev->mc_list_lock);
@@ -2323,7 +2324,6 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
break;
}
read_unlock(&in_dev->mc_list_lock);
- in_dev_put(in_dev);
}
return im;
}
@@ -2333,16 +2333,15 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
im = im->next;
while (!im) {
- if (likely(state->in_dev != NULL)) {
+ if (likely(state->in_dev != NULL))
read_unlock(&state->in_dev->mc_list_lock);
- in_dev_put(state->in_dev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->in_dev = NULL;
break;
}
- state->in_dev = in_dev_get(state->dev);
+ state->in_dev = __in_dev_get_rcu(state->dev);
if (!state->in_dev)
continue;
read_lock(&state->in_dev->mc_list_lock);
@@ -2384,7 +2383,6 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
if (likely(state->in_dev != NULL)) {
read_unlock(&state->in_dev->mc_list_lock);
- in_dev_put(state->in_dev);
state->in_dev = NULL;
}
state->dev = NULL;
@@ -2464,7 +2462,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
state->im = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *idev;
- idev = in_dev_get(state->dev);
+ idev = __in_dev_get_rcu(state->dev);
if (unlikely(idev == NULL))
continue;
read_lock(&idev->mc_list_lock);
@@ -2480,7 +2478,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
spin_unlock_bh(&im->lock);
}
read_unlock(&idev->mc_list_lock);
- in_dev_put(idev);
}
return psf;
}
@@ -2494,16 +2491,15 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev != NULL))
read_unlock(&state->idev->mc_list_lock);
- in_dev_put(state->idev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
- state->idev = in_dev_get(state->dev);
+ state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
read_lock(&state->idev->mc_list_lock);
@@ -2555,7 +2551,6 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
}
if (likely(state->idev != NULL)) {
read_unlock(&state->idev->mc_list_lock);
- in_dev_put(state->idev);
state->idev = NULL;
}
state->dev = NULL;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index b1fbe18feb5a..6bcfe52a9c87 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -67,9 +67,6 @@
* ip_id_count: idlock
*/
-/* Exported for inet_getid inline function. */
-DEFINE_SPINLOCK(inet_peer_idlock);
-
static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
n->v4daddr = daddr;
atomic_set(&n->refcnt, 1);
atomic_set(&n->rid, 0);
- n->ip_id_count = secure_ip_id(daddr);
+ atomic_set(&n->ip_id_count, secure_ip_id(daddr));
n->tcp_ts_stamp = 0;
write_lock_bh(&peer_pool_lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a7de9e3a8f18..c5f6af5d0f34 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
#define HASH_SIZE 16
-static int ipgre_net_id;
+static int ipgre_net_id __read_mostly;
struct ipgre_net {
struct ip_tunnel *tunnels[4][HASH_SIZE];
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index c5b1f71c3cd8..7242ffcc44e5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -119,7 +119,7 @@
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
-static int ipip_net_id;
+static int ipip_net_id __read_mostly;
struct ipip_net {
struct ip_tunnel *tunnels_r_l[HASH_SIZE];
struct ip_tunnel *tunnels_r[HASH_SIZE];
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ef4ee45b928f..54596f73eff5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -494,8 +494,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
return -EINVAL;
}
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
+ if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+ dev_put(dev);
return -EADDRNOTAVAIL;
+ }
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ff258b57680b..4284ceef7945 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2852,7 +2852,7 @@ static int rt_fill_info(struct net *net,
error = rt->u.dst.error;
expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
if (rt->peer) {
- id = rt->peer->ip_id_count;
+ id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
if (rt->peer->tcp_ts_stamp) {
ts = rt->peer->tcp_ts;
tsage = get_seconds() - rt->peer->tcp_ts_stamp;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e0cfa633680a..524f9760193b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+ KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
#endif
if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Now that we have two receive queues this
* shouldn't happen.
*/
- if (before(*seq, TCP_SKB_CB(skb)->seq)) {
- printk(KERN_INFO "recvmsg bug: copied %X "
- "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+ KERN_INFO "recvmsg bug: copied %X "
+ "seq %X rcvnxt %X fl %X\n", *seq,
+ TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+ flags))
break;
- }
+
offset = *seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
- "copied %X seq %X\n", *seq,
- TCP_SKB_CB(skb)->seq);
+ "copied %X seq %X rcvnxt %X fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq,
+ tp->rcv_nxt, flags);
}
/* Well, if we have backlog, try to process it now yet. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index be0c5bf7bfca..cc306ac6eb51 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
* "len" is invariant segment length, including TCP header.
*/
len += skb->data - skb_transport_header(skb);
- if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
+ if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
/* If PSH is not set, packet should be
* full sized, provided peer TCP is not badly broken.
* This observation (if it is correct 8)) allows
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd / 2);
- hint = min(hint, TCP_MIN_RCVMSS);
+ hint = min(hint, TCP_MSS_DEFAULT);
hint = max(hint, TCP_MIN_MSS);
inet_csk(sk)->icsk_ack.rcv_mss = hint;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 657ae334f125..df18ce04f41e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* when trying new connection.
*/
if (peer != NULL &&
- peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
+ (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->rx_opt.ts_recent = peer->tcp_ts;
}
@@ -217,7 +217,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (inet->opt)
inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
- tp->rx_opt.mss_clamp = 536;
+ tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
/* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket
@@ -1268,7 +1268,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
tcp_clear_options(&tmp_opt);
- tmp_opt.mss_clamp = 536;
+ tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
tcp_parse_options(skb, &tmp_opt, 0, dst);
@@ -1308,7 +1308,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
- if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
+ if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1727,9 +1727,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
if (peer) {
if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
- (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
- peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
- peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
peer->tcp_ts = tp->rx_opt.ts_recent;
}
if (release_it)
@@ -1748,9 +1748,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
- (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
- peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
- peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
@@ -1815,7 +1815,7 @@ static int tcp_v4_init_sock(struct sock *sk)
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
- tp->mss_cache = 536;
+ tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a9d34e224cb6..4be22280e6b3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -476,7 +476,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
if (newtp->af_specific->md5_lookup(sk, newsk))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
- if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
+ if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
OpenPOWER on IntegriCloud