summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-09-17 23:51:10 +0200
committerDavid S. Miller <davem@davemloft.net>2019-09-17 23:51:10 +0200
commit1bab8d4c488be22d57f9dd09968c90a0ddc413bf (patch)
tree81318cd13170a3b6b72489f63e92adb0eaf49693
parent990925fad5c227269c3fcb76c7c46811b1e86c73 (diff)
parent00b368502d18f790ab715e055869fd4bb7484a9b (diff)
downloadblackbird-op-linux-1bab8d4c488be22d57f9dd09968c90a0ddc413bf.tar.gz
blackbird-op-linux-1bab8d4c488be22d57f9dd09968c90a0ddc413bf.zip
Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
Pull in bug fixes from 'net' tree for the merge window. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sock_reuseport.h20
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/sock_reuseport.c15
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/sched/sch_generic.c3
16 files changed, 83 insertions, 23 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 32f4f05fb3da..dd39fc578607 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -649,6 +649,12 @@ M: Lino Sanfilippo <LinoSanfilippo@gmx.de>
S: Maintained
F: drivers/net/ethernet/alacritech/*
+FORCEDETH GIGABIT ETHERNET DRIVER
+M: Rain River <rain.1986.08.12@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/nvidia/*
+
ALCATEL SPEEDTOUCH USB DRIVER
M: Duncan Sands <duncan.sands@free.fr>
L: linux-usb@vger.kernel.org
@@ -17673,7 +17679,7 @@ F: Documentation/ABI/testing/sysfs-hypervisor-xen
XEN NETWORK BACKEND DRIVER
M: Wei Liu <wei.liu@kernel.org>
-M: Paul Durrant <paul.durrant@citrix.com>
+M: Paul Durrant <paul@xen.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Supported
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e4bf7a4af87a..c487d2a7d6dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -824,7 +824,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
above_thresh =
ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH);
- if (netif_tx_queue_stopped(txq) && above_thresh) {
+ if (netif_tx_queue_stopped(txq) && above_thresh &&
+ test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c61d702fe83a..a6cb2aa60e64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4713,10 +4713,12 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- phylink_stop(priv->phylink);
-
mutex_lock(&priv->lock);
+ rtnl_lock();
+ phylink_stop(priv->phylink);
+ rtnl_unlock();
+
netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
@@ -4820,9 +4822,11 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv);
- mutex_unlock(&priv->lock);
-
+ rtnl_lock();
phylink_start(priv->phylink);
+ rtnl_unlock();
+
+ mutex_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b930d5f95222..e14ec75b61d6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
- queue->rx.rsp_cons = ++cons;
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
kfree_skb(nskb);
return ~0U;
}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1632979622e..6a70845bd9ab 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
- __qdisc_run(q);
+ /* NOLOCK qdisc must check 'state' under the qdisc seqlock
+ * to avoid racing with dev_qdisc_reset()
+ */
+ if (!(q->flags & TCQ_F_NOLOCK) ||
+ likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ __qdisc_run(q);
qdisc_run_end(q);
}
}
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index d9112de85261..43f4a818d88f 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -21,7 +21,8 @@ struct sock_reuseport {
unsigned int synq_overflow_ts;
/* ID stays the same even after the size of socks[] grows. */
unsigned int reuseport_id;
- bool bind_inany;
+ unsigned int bind_inany:1;
+ unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
struct sock *socks[0]; /* array of sock pointers */
};
@@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
+static inline bool reuseport_has_conns(struct sock *sk, bool set)
+{
+ struct sock_reuseport *reuse;
+ bool ret = false;
+
+ rcu_read_lock();
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (reuse) {
+ if (set)
+ reuse->has_conns = 1;
+ ret = reuse->has_conns;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
int reuseport_get_id(struct sock_reuseport *reuse);
#endif /* _SOCK_REUSEPORT_H */
diff --git a/net/core/dev.c b/net/core/dev.c
index a9775d676285..71b18e80389f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
- __qdisc_drop(skb, &to_free);
- rc = NET_XMIT_DROP;
- } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
- qdisc_run_begin(q)) {
+ if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+ qdisc_run_begin(q)) {
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))) {
+ __qdisc_drop(skb, &to_free);
+ rc = NET_XMIT_DROP;
+ goto end_run;
+ }
qdisc_bstats_cpu_update(q, skb);
+ rc = NET_XMIT_SUCCESS;
if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
__qdisc_run(q);
+end_run:
qdisc_run_end(q);
- rc = NET_XMIT_SUCCESS;
} else {
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
qdisc_run(q);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 9408f9264d05..f3ceec93f392 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
select_by_hash:
/* no bpf or invalid bpf result: fall back to hash usage */
- if (!sk2)
- sk2 = reuse->socks[reciprocal_scale(hash, socks)];
+ if (!sk2) {
+ int i, j;
+
+ i = j = reciprocal_scale(hash, socks);
+ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+ i++;
+ if (i >= reuse->num_socks)
+ i = 0;
+ if (i == j)
+ goto out;
+ }
+ sk2 = reuse->socks[i];
+ }
}
out:
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index b501c90aabe4..73002022c9d8 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -644,6 +644,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
tag_ops = dsa_tag_driver_get(tag_protocol);
if (IS_ERR(tag_ops)) {
+ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+ return -EPROBE_DEFER;
dev_warn(ds->dev, "No tagger for this switch\n");
return PTR_ERR(tag_ops);
}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 7bd29e694603..9a0fe0c2fa02 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -15,6 +15,7 @@
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
+#include <net/sock_reuseport.h>
int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
@@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
}
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
+ reuseport_has_conns(sk, true);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
inet->inet_id = jiffies;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fbcd9be3a470..cf755156a684 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif);
if (score > badness) {
- if (sk->sk_reuseport) {
+ if (sk->sk_reuseport &&
+ sk->sk_state != TCP_ESTABLISHED) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (result)
+ if (result && !reuseport_has_conns(sk, false))
return result;
}
badness = score;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ab897ded4df..96f939248d2f 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -27,6 +27,7 @@
#include <net/ip6_route.h>
#include <net/tcp_states.h>
#include <net/dsfield.h>
+#include <net/sock_reuseport.h>
#include <linux/errqueue.h>
#include <linux/uaccess.h>
@@ -254,6 +255,7 @@ ipv4_connected:
goto out;
}
+ reuseport_has_conns(sk, true);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
out:
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index dd2d0b963260..d5779d6a6065 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
if (unlikely(!tun_info ||
!(tun_info->mode & IP_TUNNEL_INFO_TX) ||
ip_tunnel_info_af(tun_info) != AF_INET6))
- return -EINVAL;
+ goto tx_err;
key = &tun_info->key;
memset(&fl6, 0, sizeof(fl6));
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2c8beb3896d1..aae4938f3dea 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif);
if (score > badness) {
- if (sk->sk_reuseport) {
+ if (sk->sk_reuseport &&
+ sk->sk_state != TCP_ESTABLISHED) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- if (result)
+ if (result && !reuseport_has_conns(sk, false))
return result;
}
result = sk;
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 9252ad126335..ac46d8961b61 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
static const char *const rds_ib_stat_names[] = {
"ib_connect_raced",
"ib_listen_closed_stale",
- "s_ib_evt_handler_call",
+ "ib_evt_handler_call",
"ib_tasklet_call",
"ib_tx_cq_event",
"ib_tx_ring_full",
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ac28f6a5d70e..17bd8f539bc7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
void qdisc_put(struct Qdisc *qdisc)
{
+ if (!qdisc)
+ return;
+
if (qdisc->flags & TCQ_F_BUILTIN ||
!refcount_dec_and_test(&qdisc->refcnt))
return;
OpenPOWER on IntegriCloud