summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-03 12:53:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-03 12:53:47 -0800
commit43d86ee8c639df750529b4d8f062b328b61c423e (patch)
tree076161dd7ce3f843b9c965a780ecfbf020f75e8e /net
parent645ff1e8e704c4f33ab1fcd3c87f95cb9b6d7144 (diff)
parentc5ee066333ebc322a24a00a743ed941a0c68617e (diff)
downloadtalos-obmc-linux-43d86ee8c639df750529b4d8f062b328b61c423e.tar.gz
talos-obmc-linux-43d86ee8c639df750529b4d8f062b328b61c423e.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Several fixes here. Basically split down the line between newly introduced regressions and long existing problems: 1) Double free in tipc_enable_bearer(), from Cong Wang. 2) Many fixes to nf_conncount, from Florian Westphal. 3) op->get_regs_len() can throw an error, check it, from Yunsheng Lin. 4) Need to use GFP_ATOMIC in *_add_hash_mac_address() of fsl/fman driver, from Scott Wood. 5) Inifnite loop in fib_empty_table(), from Yue Haibing. 6) Use after free in ax25_fillin_cb(), from Cong Wang. 7) Fix socket locking in nr_find_socket(), also from Cong Wang. 8) Fix WoL wakeup enable in r8169, from Heiner Kallweit. 9) On 32-bit sock->sk_stamp is not thread-safe, from Deepa Dinamani. 10) Fix ptr_ring wrap during queue swap, from Cong Wang. 11) Missing shutdown callback in hinic driver, from Xue Chaojing. 12) Need to return NULL on error from ip6_neigh_lookup(), from Stefano Brivio. 13) BPF out of bounds speculation fixes from Daniel Borkmann" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (57 commits) ipv6: Consider sk_bound_dev_if when binding a socket to an address ipv6: Fix dump of specific table with strict checking bpf: add various test cases to selftests bpf: prevent out of bounds speculation on pointer arithmetic bpf: fix check_map_access smin_value test when pointer contains offset bpf: restrict unknown scalars of mixed signed bounds for unprivileged bpf: restrict stack pointer arithmetic for unprivileged bpf: restrict map value pointer arithmetic for unprivileged bpf: enable access to ax register also from verifier rewrite bpf: move tmp variable into ax register in interpreter bpf: move {prev_,}insn_idx into verifier env isdn: fix kernel-infoleak in capi_unlocked_ioctl ipv6: route: Fix return value of ip6_neigh_lookup() on neigh_create() error net/hamradio/6pack: use mod_timer() to rearm timers net-next/hinic:add shutdown callback net: hns3: call hns3_nic_net_open() while doing HNAE3_UP_CLIENT ip: validate header length on virtual device xmit tap: call skb_probe_transport_header after setting skb->dev ptr_ring: wrap back ->producer in __ptr_ring_swap_queue() net: rds: remove unnecessary NULL check ...
Diffstat (limited to 'net')
-rw-r--r--net/ax25/af_ax25.c11
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/compat.c15
-rw-r--r--net/core/ethtool.c12
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/core/sock.c15
-rw-r--r--net/ipv4/fib_rules.c8
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/ip_vti.c12
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_gre.c10
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ip6mr.c17
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/netfilter/nf_conncount.c290
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_connlimit.c14
-rw-r--r--net/netrom/af_netrom.c15
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/tipc/bearer.c1
-rw-r--r--net/tipc/netlink_compat.c2
28 files changed, 263 insertions, 236 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c603d33d5410..5d01edf8d819 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- dev = dev_get_by_name(&init_net, devname);
+ rtnl_lock();
+ dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
+ rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25->ax25_dev) {
+ rtnl_unlock();
+ res = -ENODEV;
+ break;
+ }
ax25_fillin_cb(ax25, ax25->ax25_dev);
- dev_put(dev);
+ rtnl_unlock();
break;
default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 9a3a301e1e2f..d92195cd7834 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
diff --git a/net/compat.c b/net/compat.c
index f7084780a8f8..c3a2f868e8af 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
ctv = (struct compat_timeval __user *) userstamp;
err = -ENOENT;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
+
if (tv.tv_sec == -1)
return err;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
err = 0;
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
ctv = (struct compat_timespec __user *) userstamp;
err = -ENOENT;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return err;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- ts = ktime_to_timespec(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ ts = ktime_to_timespec(kt);
}
err = 0;
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d05402868575..158264f7cfaf 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -793,8 +793,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
- info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_regs_len) {
+ int ret = ops->get_regs_len(dev);
+
+ if (ret > 0)
+ info.regdump_len = ret;
+ }
+
if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
@@ -1337,6 +1342,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
return -EFAULT;
reglen = ops->get_regs_len(dev);
+ if (reglen <= 0)
+ return reglen;
+
if (regs.len > reglen)
regs.len = reglen;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 48f61885fd6f..5ea1bed08ede 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4104,6 +4104,11 @@ static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
+ if (!addr) {
+ NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
+ return -EINVAL;
+ }
+
if (brport_idx) {
dev = __dev_get_by_index(net, brport_idx);
if (!dev) {
diff --git a/net/core/sock.c b/net/core/sock.c
index f00902c532cc..6aa2e7e0b4fb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2751,6 +2751,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = SK_DEFAULT_STAMP;
+#if BITS_PER_LONG==32
+ seqlock_init(&sk->sk_stamp_seq);
+#endif
atomic_set(&sk->sk_zckey, 0);
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -2850,12 +2853,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
struct timeval tv;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
if (tv.tv_sec == -1)
return -ENOENT;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
}
@@ -2866,11 +2870,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
struct timespec ts;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return -ENOENT;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
ts = ktime_to_timespec(sk->sk_stamp);
}
return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f8eb78d042a4..cfec3af54c8d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -198,11 +198,15 @@ static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
static struct fib_table *fib_empty_table(struct net *net)
{
- u32 id;
+ u32 id = 1;
- for (id = 1; id <= RT_TABLE_MAX; id++)
+ while (1) {
if (!fib_get_table(net, id))
return fib_new_table(net, id);
+
+ if (id++ == RT_TABLE_MAX)
+ break;
+ }
return NULL;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c7a7bd58a23c..d1d09f3e5f9e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -676,6 +676,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
@@ -719,6 +722,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
erspan_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
@@ -762,6 +768,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
return NETDEV_TX_OK;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 284a22154b4e..c4f5602308ed 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
__be32 dst;
bool connected;
- /* ensure we can access the inner net header, for several users below */
- if (skb->protocol == htons(ETH_P_IP))
- inner_nhdr_len = sizeof(struct iphdr);
- else if (skb->protocol == htons(ETH_P_IPV6))
- inner_nhdr_len = sizeof(struct ipv6hdr);
- if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
- goto tx_error;
-
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index de31b302d69c..d7b43e700023 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
struct flowi fl;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
memset(&fl, 0, sizeof(fl));
switch (skb->protocol) {
@@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
break;
default:
- dev->stats.tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ goto tx_err;
}
/* override mark with tunnel output key */
fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
return vti_xmit(skb, dev, &fl);
+
+tx_err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static int vti4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 521e471f1cf9..8eeec6eb2bd3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4736,8 +4736,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
idev = ipv6_find_idev(dev);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
+ if (!idev)
+ return -ENOBUFS;
if (!ipv6_allow_optimistic_dad(net, idev))
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index f0cd291034f0..0bfb6cc0a30a 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -350,6 +350,9 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
err = -EINVAL;
goto out_unlock;
}
+ }
+
+ if (sk->sk_bound_dev_if) {
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
if (!dev) {
err = -ENODEV;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ae3786132c23..6613d8dbb0e5 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -627,7 +627,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
return -ENOENT;
}
- res = fib6_dump_table(tb, skb, cb);
+ if (!cb->args[0]) {
+ res = fib6_dump_table(tb, skb, cb);
+ if (!res)
+ cb->args[0] = 1;
+ }
goto out;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 229e55c99021..09d0826742f8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -881,6 +881,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
@@ -923,6 +926,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
int nhoff;
int thoff;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
@@ -995,8 +1001,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
goto tx_err;
}
} else {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
switch (skb->protocol) {
case htons(ETH_P_IP):
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1004,7 +1008,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
&dsfield, &encap_limit);
break;
case htons(ETH_P_IPV6):
- if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+ if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
goto tx_err;
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
&dsfield, &encap_limit))
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 99179b9c8384..0c6403cf8b52 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- /* ensure we can access the full inner ip header */
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return -1;
-
iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
- return -1;
-
ipv6h = ipv6_hdr(skb);
tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
@@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &t->dev->stats;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = ip4ip6_tnl_xmit(skb, dev);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 706fe42e4928..8b6eefff2f7e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
- struct ipv6hdr *ipv6h;
struct flowi fl;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
memset(&fl, 0, sizeof(fl));
switch (skb->protocol) {
case htons(ETH_P_IPV6):
- ipv6h = ipv6_hdr(skb);
-
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
- vti6_addr_conflict(t, ipv6h))
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
goto tx_err;
xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8276f1224f16..30337b38274b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,7 @@
#include <linux/export.h>
#include <net/ip6_checksum.h>
#include <linux/netconf.h>
+#include <net/ip_tunnels.h>
#include <linux/nospec.h>
@@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi6_mark = skb->mark,
};
- int err;
- err = ip6mr_fib_lookup(net, &fl6, &mrt);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
+ if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
+ goto tx_err;
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
@@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
+
+tx_err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static int reg_vif_get_iflink(const struct net_device *dev)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a5bb59ee50ac..36a3d8dc61f5 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -210,7 +210,7 @@ found:
if (next && next->ip_defrag_offset < end)
goto discard_fq;
- /* Note : skb->ip_defrag_offset and skb->dev share the same location */
+ /* Note : skb->ip_defrag_offset and skb->sk share the same location */
dev = skb->dev;
if (dev)
fq->iif = dev->ifindex;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 194bc162866d..40b225f87d5e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -210,7 +210,9 @@ struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
n = __ipv6_neigh_lookup(dev, daddr);
if (n)
return n;
- return neigh_create(&nd_tbl, daddr, dev);
+
+ n = neigh_create(&nd_tbl, daddr, dev);
+ return IS_ERR(n) ? NULL : n;
}
static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
@@ -5054,12 +5056,16 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
{
struct net *net;
int delay;
+ int ret;
if (!write)
return -EINVAL;
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
- proc_dointvec(ctl, write, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 51c9f75f34b9..1e03305c0549 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1021,6 +1021,9 @@ tx_error:
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 9cd180bda092..7554c56b2e63 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -33,12 +33,6 @@
#define CONNCOUNT_SLOTS 256U
-#ifdef CONFIG_LOCKDEP
-#define CONNCOUNT_LOCK_SLOTS 8U
-#else
-#define CONNCOUNT_LOCK_SLOTS 256U
-#endif
-
#define CONNCOUNT_GC_MAX_NODES 8
#define MAX_KEYLEN 5
@@ -49,8 +43,6 @@ struct nf_conncount_tuple {
struct nf_conntrack_zone zone;
int cpu;
u32 jiffies32;
- bool dead;
- struct rcu_head rcu_head;
};
struct nf_conncount_rb {
@@ -60,7 +52,7 @@ struct nf_conncount_rb {
struct rcu_head rcu_head;
};
-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
struct nf_conncount_data {
unsigned int keylen;
@@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
return memcmp(a, b, klen * sizeof(u32));
}
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone)
-{
- struct nf_conncount_tuple *conn;
-
- if (WARN_ON_ONCE(list->count > INT_MAX))
- return NF_CONNCOUNT_ERR;
-
- conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
- if (conn == NULL)
- return NF_CONNCOUNT_ERR;
-
- conn->tuple = *tuple;
- conn->zone = *zone;
- conn->cpu = raw_smp_processor_id();
- conn->jiffies32 = (u32)jiffies;
- conn->dead = false;
- spin_lock_bh(&list->list_lock);
- if (list->dead == true) {
- kmem_cache_free(conncount_conn_cachep, conn);
- spin_unlock_bh(&list->list_lock);
- return NF_CONNCOUNT_SKIP;
- }
- list_add_tail(&conn->node, &list->head);
- list->count++;
- spin_unlock_bh(&list->list_lock);
- return NF_CONNCOUNT_ADDED;
-}
-EXPORT_SYMBOL_GPL(nf_conncount_add);
-
-static void __conn_free(struct rcu_head *h)
-{
- struct nf_conncount_tuple *conn;
-
- conn = container_of(h, struct nf_conncount_tuple, rcu_head);
- kmem_cache_free(conncount_conn_cachep, conn);
-}
-
-static bool conn_free(struct nf_conncount_list *list,
+static void conn_free(struct nf_conncount_list *list,
struct nf_conncount_tuple *conn)
{
- bool free_entry = false;
-
- spin_lock_bh(&list->list_lock);
-
- if (conn->dead) {
- spin_unlock_bh(&list->list_lock);
- return free_entry;
- }
+ lockdep_assert_held(&list->list_lock);
list->count--;
- conn->dead = true;
- list_del_rcu(&conn->node);
- if (list->count == 0) {
- list->dead = true;
- free_entry = true;
- }
+ list_del(&conn->node);
- spin_unlock_bh(&list->list_lock);
- call_rcu(&conn->rcu_head, __conn_free);
- return free_entry;
+ kmem_cache_free(conncount_conn_cachep, conn);
}
static const struct nf_conntrack_tuple_hash *
find_or_evict(struct net *net, struct nf_conncount_list *list,
- struct nf_conncount_tuple *conn, bool *free_entry)
+ struct nf_conncount_tuple *conn)
{
const struct nf_conntrack_tuple_hash *found;
unsigned long a, b;
int cpu = raw_smp_processor_id();
- __s32 age;
+ u32 age;
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
if (found)
@@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
*/
age = a - b;
if (conn->cpu == cpu || age >= 2) {
- *free_entry = conn_free(list, conn);
+ conn_free(list, conn);
return ERR_PTR(-ENOENT);
}
return ERR_PTR(-EAGAIN);
}
-void nf_conncount_lookup(struct net *net,
- struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit)
+static int __nf_conncount_add(struct net *net,
+ struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
const struct nf_conntrack_tuple_hash *found;
struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct;
unsigned int collect = 0;
- bool free_entry = false;
-
- /* best effort only */
- *addit = tuple ? true : false;
/* check the saved connections */
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
if (collect > CONNCOUNT_GC_MAX_NODES)
break;
- found = find_or_evict(net, list, conn, &free_entry);
+ found = find_or_evict(net, list, conn);
if (IS_ERR(found)) {
/* Not found, but might be about to be confirmed */
if (PTR_ERR(found) == -EAGAIN) {
- if (!tuple)
- continue;
-
if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
nf_ct_zone_id(zone, zone->dir))
- *addit = false;
- } else if (PTR_ERR(found) == -ENOENT)
+ return 0; /* already exists */
+ } else {
collect++;
+ }
continue;
}
found_ct = nf_ct_tuplehash_to_ctrack(found);
- if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
+ if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
/*
* We should not see tuples twice unless someone hooks
@@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
*
* Attempt to avoid a re-add in this case.
*/
- *addit = false;
+ nf_ct_put(found_ct);
+ return 0;
} else if (already_closed(found_ct)) {
/*
* we do not care about connections which are
@@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
nf_ct_put(found_ct);
}
+
+ if (WARN_ON_ONCE(list->count > INT_MAX))
+ return -EOVERFLOW;
+
+ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+ if (conn == NULL)
+ return -ENOMEM;
+
+ conn->tuple = *tuple;
+ conn->zone = *zone;
+ conn->cpu = raw_smp_processor_id();
+ conn->jiffies32 = (u32)jiffies;
+ list_add_tail(&conn->node, &list->head);
+ list->count++;
+ return 0;
}
-EXPORT_SYMBOL_GPL(nf_conncount_lookup);
+
+int nf_conncount_add(struct net *net,
+ struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
+{
+ int ret;
+
+ /* check the saved connections */
+ spin_lock_bh(&list->list_lock);
+ ret = __nf_conncount_add(net, list, tuple, zone);
+ spin_unlock_bh(&list->list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_add);
void nf_conncount_list_init(struct nf_conncount_list *list)
{
spin_lock_init(&list->list_lock);
INIT_LIST_HEAD(&list->head);
list->count = 0;
- list->dead = false;
}
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
-/* Return true if the list is empty */
+/* Return true if the list is empty. Must be called with BH disabled. */
bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list)
{
@@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct;
unsigned int collected = 0;
- bool free_entry = false;
bool ret = false;
+ /* don't bother if other cpu is already doing GC */
+ if (!spin_trylock(&list->list_lock))
+ return false;
+
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
- found = find_or_evict(net, list, conn, &free_entry);
+ found = find_or_evict(net, list, conn);
if (IS_ERR(found)) {
- if (PTR_ERR(found) == -ENOENT) {
- if (free_entry)
- return true;
+ if (PTR_ERR(found) == -ENOENT)
collected++;
- }
continue;
}
@@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
* closed already -> ditch it
*/
nf_ct_put(found_ct);
- if (conn_free(list, conn))
- return true;
+ conn_free(list, conn);
collected++;
continue;
}
nf_ct_put(found_ct);
if (collected > CONNCOUNT_GC_MAX_NODES)
- return false;
+ break;
}
- spin_lock_bh(&list->list_lock);
- if (!list->count) {
- list->dead = true;
+ if (!list->count)
ret = true;
- }
- spin_unlock_bh(&list->list_lock);
+ spin_unlock(&list->list_lock);
return ret;
}
@@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
kmem_cache_free(conncount_rb_cachep, rbconn);
}
+/* caller must hold tree nf_conncount_locks[] lock */
static void tree_nodes_free(struct rb_root *root,
struct nf_conncount_rb *gc_nodes[],
unsigned int gc_count)
@@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
while (gc_count) {
rbconn = gc_nodes[--gc_count];
spin_lock(&rbconn->list.list_lock);
- rb_erase(&rbconn->node, root);
- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+ if (!rbconn->list.count) {
+ rb_erase(&rbconn->node, root);
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+ }
spin_unlock(&rbconn->list.list_lock);
}
}
@@ -341,20 +301,19 @@ insert_tree(struct net *net,
struct rb_root *root,
unsigned int hash,
const u32 *key,
- u8 keylen,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
- enum nf_conncount_list_add ret;
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0;
- bool node_found = false;
-
- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ u8 keylen = data->keylen;
+ bool do_gc = true;
+ spin_lock_bh(&nf_conncount_locks[hash]);
+restart:
parent = NULL;
rbnode = &(root->rb_node);
while (*rbnode) {
@@ -368,45 +327,32 @@ insert_tree(struct net *net,
} else if (diff > 0) {
rbnode = &((*rbnode)->rb_right);
} else {
- /* unlikely: other cpu added node already */
- node_found = true;
- ret = nf_conncount_add(&rbconn->list, tuple, zone);
- if (ret == NF_CONNCOUNT_ERR) {
+ int ret;
+
+ ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
+ if (ret)
count = 0; /* hotdrop */
- } else if (ret == NF_CONNCOUNT_ADDED) {
+ else
count = rbconn->list.count;
- } else {
- /* NF_CONNCOUNT_SKIP, rbconn is already
- * reclaimed by gc, insert a new tree node
- */
- node_found = false;
- }
- break;
+ tree_nodes_free(root, gc_nodes, gc_count);
+ goto out_unlock;
}
if (gc_count >= ARRAY_SIZE(gc_nodes))
continue;
- if (nf_conncount_gc_list(net, &rbconn->list))
+ if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
gc_nodes[gc_count++] = rbconn;
}
if (gc_count) {
tree_nodes_free(root, gc_nodes, gc_count);
- /* tree_node_free before new allocation permits
- * allocator to re-use newly free'd object.
- *
- * This is a rare event; in most cases we will find
- * existing node to re-use. (or gc_count is 0).
- */
-
- if (gc_count >= ARRAY_SIZE(gc_nodes))
- schedule_gc_worker(data, hash);
+ schedule_gc_worker(data, hash);
+ gc_count = 0;
+ do_gc = false;
+ goto restart;
}
- if (node_found)
- goto out_unlock;
-
/* expected case: match, insert new node */
rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
if (rbconn == NULL)
@@ -430,7 +376,7 @@ insert_tree(struct net *net,
rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
out_unlock:
- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+ spin_unlock_bh(&nf_conncount_locks[hash]);
return count;
}
@@ -441,7 +387,6 @@ count_tree(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
- enum nf_conncount_list_add ret;
struct rb_root *root;
struct rb_node *parent;
struct nf_conncount_rb *rbconn;
@@ -454,7 +399,6 @@ count_tree(struct net *net,
parent = rcu_dereference_raw(root->rb_node);
while (parent) {
int diff;
- bool addit;
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
@@ -464,31 +408,36 @@ count_tree(struct net *net,
} else if (diff > 0) {
parent = rcu_dereference_raw(parent->rb_right);
} else {
- /* same source network -> be counted! */
- nf_conncount_lookup(net, &rbconn->list, tuple, zone,
- &addit);
+ int ret;
- if (!addit)
+ if (!tuple) {
+ nf_conncount_gc_list(net, &rbconn->list);
return rbconn->list.count;
+ }
- ret = nf_conncount_add(&rbconn->list, tuple, zone);
- if (ret == NF_CONNCOUNT_ERR) {
- return 0; /* hotdrop */
- } else if (ret == NF_CONNCOUNT_ADDED) {
- return rbconn->list.count;
- } else {
- /* NF_CONNCOUNT_SKIP, rbconn is already
- * reclaimed by gc, insert a new tree node
- */
+ spin_lock_bh(&rbconn->list.list_lock);
+ /* Node might be about to be free'd.
+ * We need to defer to insert_tree() in this case.
+ */
+ if (rbconn->list.count == 0) {
+ spin_unlock_bh(&rbconn->list.list_lock);
break;
}
+
+ /* same source network -> be counted! */
+ ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
+ spin_unlock_bh(&rbconn->list.list_lock);
+ if (ret)
+ return 0; /* hotdrop */
+ else
+ return rbconn->list.count;
}
}
if (!tuple)
return 0;
- return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
+ return insert_tree(net, data, root, hash, key, tuple, zone);
}
static void tree_gc_worker(struct work_struct *work)
@@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
struct rb_node *node;
unsigned int tree, next_tree, gc_count = 0;
- tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
+ tree = data->gc_tree % CONNCOUNT_SLOTS;
root = &data->root[tree];
+ local_bh_disable();
rcu_read_lock();
for (node = rb_first(root); node != NULL; node = rb_next(node)) {
rbconn = rb_entry(node, struct nf_conncount_rb, node);
if (nf_conncount_gc_list(data->net, &rbconn->list))
- gc_nodes[gc_count++] = rbconn;
+ gc_count++;
}
rcu_read_unlock();
+ local_bh_enable();
+
+ cond_resched();
spin_lock_bh(&nf_conncount_locks[tree]);
+ if (gc_count < ARRAY_SIZE(gc_nodes))
+ goto next; /* do not bother */
- if (gc_count) {
- tree_nodes_free(root, gc_nodes, gc_count);
+ gc_count = 0;
+ node = rb_first(root);
+ while (node != NULL) {
+ rbconn = rb_entry(node, struct nf_conncount_rb, node);
+ node = rb_next(node);
+
+ if (rbconn->list.count > 0)
+ continue;
+
+ gc_nodes[gc_count++] = rbconn;
+ if (gc_count >= ARRAY_SIZE(gc_nodes)) {
+ tree_nodes_free(root, gc_nodes, gc_count);
+ gc_count = 0;
+ }
}
+ tree_nodes_free(root, gc_nodes, gc_count);
+next:
clear_bit(tree, data->pending_trees);
next_tree = (tree + 1) % CONNCOUNT_SLOTS;
- next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
+ next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
if (next_tree < CONNCOUNT_SLOTS) {
data->gc_tree = next_tree;
@@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
{
int i;
- BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
- BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
-
- for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+ for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fec814dace5a..2b0a93300dd7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5727,6 +5727,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
goto nla_put_failure;
nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK);
+ if (!nest)
+ goto nla_put_failure;
if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
goto nla_put_failure;
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index b90d96ba4a12..af1497ab9464 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int count;
- bool addit;
tuple_ptr = &tuple;
@@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
return;
}
- nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
- &addit);
- count = priv->list.count;
-
- if (!addit)
- goto out;
-
- if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
+ if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
regs->verdict.code = NF_DROP;
return;
}
- count++;
-out:
+
+ count = priv->list.count;
if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 03f37c4e64fe..1d3144d19903 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index b9bbcf3d6c63..c16f0a362c32 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -623,7 +623,7 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
if (rtn->rds_tcp_sysctl)
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
- if (net != &init_net && rtn->ctl_table)
+ if (net != &init_net)
kfree(rtn->ctl_table);
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c7ae1ed5324f..a6a060925e5d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -542,7 +542,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
/* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
- svsk->sk_sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
len = skb->len;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index fb2c0d8f359f..d27f30a9a01d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -319,7 +319,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
bearer_disable(net, b);
- kfree(b);
errstr = "failed to create discoverer";
goto rejected;
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 21f6ccc89401..40f5cae623a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -904,6 +904,8 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
+ if (!hdr)
+ return -EMSGSIZE;
nest = nla_nest_start(args, TIPC_NLA_SOCK);
if (!nest) {
OpenPOWER on IntegriCloud