summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c13
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/ethtool.c12
-rw-r--r--net/core/filter.c21
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/sock.c15
7 files changed, 31 insertions, 49 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f19bf3dc2bd6..9938952c5c78 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
* datagram_poll - generic datagram poll
+ * @file: file struct
* @sock: socket
- * @events to wait for
+ * @wait: poll table
*
* Datagram poll: Again totally generic. This also handles
* sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
* and you use a different write policy from sock_writeable()
* then please supply your own write_space callback.
*/
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
- __poll_t mask = 0;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6e18242a1cae..a5aa1c7444e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (dev_get_valid_name(net, dev, pat) < 0)
+ err = dev_get_valid_name(net, dev, pat);
+ if (err < 0)
goto out;
}
@@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_close(dev);
/* And unlink it from device chain */
- err = -ENODEV;
unlist_netdevice(dev);
synchronize_net();
@@ -8823,7 +8823,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
int i;
struct hlist_head *hash;
- hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
+ hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c15075dc7572..e677a20180cf 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -911,7 +911,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
memset(&info, 0, sizeof(info));
info.cmd = ETHTOOL_GSSET_INFO;
- info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
+ info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER);
if (!info_buf)
return -ENOMEM;
@@ -1017,7 +1017,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
- rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
+ rule_buf = kcalloc(info.rule_cnt, sizeof(u32),
GFP_USER);
if (!rule_buf)
return -ENOMEM;
@@ -1816,7 +1816,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
return -EFAULT;
test.len = test_len;
- data = kmalloc(test_len * sizeof(u64), GFP_USER);
+ data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
if (!data)
return -ENOMEM;
@@ -1852,7 +1852,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
WARN_ON_ONCE(!ret);
gstrings.len = ret;
- data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
+ data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
if (gstrings.len && !data)
return -ENOMEM;
@@ -1952,7 +1952,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = vzalloc(n_stats * sizeof(u64));
+ data = vzalloc(array_size(n_stats, sizeof(u64)));
if (n_stats && !data)
return -ENOMEM;
@@ -1996,7 +1996,7 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = vzalloc(n_stats * sizeof(u64));
+ data = vzalloc(array_size(n_stats, sizeof(u64)));
if (n_stats && !data)
return -ENOMEM;
diff --git a/net/core/filter.c b/net/core/filter.c
index 3d9ba7e5965a..e7f12e9f598c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3214,20 +3214,6 @@ err:
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
- unsigned int len;
-
- if (unlikely(!(fwd->flags & IFF_UP)))
- return -ENETDOWN;
-
- len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
- if (skb->len > len)
- return -EMSGSIZE;
-
- return 0;
-}
-
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
@@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
}
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
- if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+ struct bpf_dtab_netdev *dst = fwd;
+
+ err = dev_map_generic_redirect(dst, skb, xdp_prog);
+ if (unlikely(err))
goto err;
- skb->dev = fwd;
- generic_xdp_tx(skb, xdp_prog);
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
struct xdp_sock *xs = fwd;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a7a9c3d738ba..8e3fda9e725c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -119,13 +119,14 @@ unsigned long neigh_rand_reach_time(unsigned long base)
EXPORT_SYMBOL(neigh_rand_reach_time);
-static bool neigh_del(struct neighbour *n, __u8 state,
+static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
struct neighbour __rcu **np, struct neigh_table *tbl)
{
bool retval = false;
write_lock(&n->lock);
- if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
+ if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
+ !(n->flags & flags)) {
struct neighbour *neigh;
neigh = rcu_dereference_protected(n->next,
@@ -157,7 +158,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock)))) {
if (n == ndel)
- return neigh_del(n, 0, np, tbl);
+ return neigh_del(n, 0, 0, np, tbl);
np = &n->next;
}
return false;
@@ -185,7 +186,8 @@ static int neigh_forced_gc(struct neigh_table *tbl)
* - nobody refers to it.
* - it is not permanent
*/
- if (neigh_del(n, NUD_PERMANENT, np, tbl)) {
+ if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
+ tbl)) {
shrunk = 1;
continue;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7e4ede34cc52..49368e21d228 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3603,7 +3603,8 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
return -ENOMEM;
strcpy(pkt_dev->odevname, ifname);
- pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
+ pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS,
+ sizeof(struct flow_state)),
node);
if (pkt_dev->flows == NULL) {
kfree(pkt_dev);
diff --git a/net/core/sock.c b/net/core/sock.c
index f333d75ef1a9..bcc41829a16d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -728,22 +728,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
- val = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
- if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
- inet_sk(sk)->inet_num &&
- (sk->sk_reuse != val)) {
- ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
- break;
- }
- sk->sk_reuse = val;
+ sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_REUSEPORT:
- if ((sk->sk_family == PF_INET || sk->sk_family == PF_INET6) &&
- inet_sk(sk)->inet_num &&
- (sk->sk_reuseport != valbool)) {
- ret = (sk->sk_state == TCP_ESTABLISHED) ? -EISCONN : -EUCLEAN;
- break;
- }
sk->sk_reuseport = valbool;
break;
case SO_TYPE:
OpenPOWER on IntegriCloud