diff options
Diffstat (limited to 'net')
57 files changed, 533 insertions, 358 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index e0adcd123f48..711d7156efd8 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb) caifd = caif_get(skb->dev); WARN_ON(caifd == NULL); - if (caifd == NULL) + if (!caifd) { + rcu_read_unlock(); return; + } caifd_hold(caifd); rcu_read_unlock(); diff --git a/net/core/dev.c b/net/core/dev.c index a5aa1c7444e6..559a91271f82 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) dev->tx_queue_len = new_len; res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); res = notifier_to_errno(res); - if (res) { - netdev_err(dev, - "refused to change device tx_queue_len\n"); - dev->tx_queue_len = orig_len; - return res; - } - return dev_qdisc_change_tx_queue_len(dev); + if (res) + goto err_rollback; + res = dev_qdisc_change_tx_queue_len(dev); + if (res) + goto err_rollback; } return 0; + +err_rollback: + netdev_err(dev, "refused to change device tx_queue_len\n"); + dev->tx_queue_len = orig_len; + return res; } /** diff --git a/net/core/filter.c b/net/core/filter.c index 06da770f543f..9dfd145eedcc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { + u8 *end = skb_tail_pointer(skb); + u8 *net = skb_network_header(skb); + u8 *mac = skb_mac_header(skb); u8 *ptr; - if (unlikely(offset > 0xffff || len > skb_headlen(skb))) + if (unlikely(offset > 0xffff || len > (end - mac))) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: - ptr = skb_mac_header(skb) + offset; + ptr = mac + offset; break; case BPF_HDR_START_NET: - ptr = skb_network_header(skb) + offset; + ptr = net + offset; break; default: goto err_clear; } - if (likely(ptr >= skb_mac_header(skb) && - ptr + len <= skb_tail_pointer(skb))) { + if (likely(ptr >= mac && ptr + len <= end)) { memcpy(to, ptr, len); return 0; } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index e7e626fb87bb..e45098593dc0 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) return -EINVAL; - prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); + prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); if (!prog->name) return -ENOMEM; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 68bf07206744..43a932cb609b 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool) struct page *page; /* Empty recycle ring */ - while ((page = ptr_ring_consume(&pool->ring))) { + while ((page = ptr_ring_consume_bh(&pool->ring))) { /* Verify the refcnt invariant of cached pages */ if (!(page_ref_count(page) == 1)) pr_crit("%s() page_pool refcnt %d violation\n", diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ef61222fdef..e3f743c141b3 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) return err; } - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; - - __dev_notify_flags(dev, old_flags, ~0U); + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { + __dev_notify_flags(dev, old_flags, 0U); + } else { + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; + __dev_notify_flags(dev, old_flags, ~0U); + } return 0; } EXPORT_SYMBOL(rtnl_configure_link); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e51f8555e11..fb35b62af272 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3720,6 +3720,7 @@ normal: net_warn_ratelimited( "skb_segment: too many frags: %u %u\n", pos, mss); + err = -EINVAL; goto err; } @@ -3753,11 +3754,10 @@ skip_fraglist: perform_csum_check: if (!csum) { - if (skb_has_shared_frag(nskb)) { - err = __skb_linearize(nskb); - if (err) - goto err; - } + if (skb_has_shared_frag(nskb) && + __skb_linearize(nskb)) + goto err; + if (!nskb->remcsum_offload) nskb->ip_summed = CHECKSUM_NONE; SKB_GSO_CB(nskb)->csum = diff --git a/net/core/sock.c b/net/core/sock.c index 9e8f65585b81..bc2d7a37297f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg, pfrag->offset += use; sge = sg + sg_curr - 1; - if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && - sg->offset + sg->length == orig_offset) { - sg->length += use; + if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page && + sge->offset + sge->length == orig_offset) { + sge->length += use; } else { sge = sg + sg_curr; sg_unmark_end(sge); diff --git a/net/core/xdp.c b/net/core/xdp.c index 9d1f22072d5d..6771f1855b96 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, rcu_read_lock(); /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); - xa->zc_alloc->free(xa->zc_alloc, handle); + if (!WARN_ON_ONCE(!xa)) + xa->zc_alloc->free(xa->zc_alloc, handle); rcu_read_unlock(); default: /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1e3b6a6d8a40..732369c80644 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) { struct dsa_port *dp = dsa_slave_to_port(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_detach(slave_dev); rtnl_lock(); @@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev) { struct dsa_port *dp = dsa_slave_to_port(slave_dev); + if (!netif_running(slave_dev)) + return 0; + netif_device_attach(slave_dev); rtnl_lock(); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e46cdd310e5f..2998b0e47d4b 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) return ip_hdr(skb)->daddr; in_dev = __in_dev_get_rcu(dev); - BUG_ON(!in_dev); net = dev_net(dev); scope = RT_SCOPE_UNIVERSE; if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); struct flowi4 fl4 = { .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, + .flowi4_mark = vmark ? skb->mark : 0, }; if (!fib_lookup(net, &fl4, &res, 0)) return FIB_RES_PREFSRC(net, res); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index b3c899a630a0..75151be21413 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) spin_lock_bh(&im->lock); if (pmc) { im->interface = pmc->interface; - im->sfmode = pmc->sfmode; - if (pmc->sfmode == MCAST_INCLUDE) { + if (im->sfmode == MCAST_INCLUDE) { im->tomb = pmc->tomb; im->sources = pmc->sources; for (psf = im->sources; psf; psf = psf->sf_next) @@ -1388,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev, /* * A socket has joined a multicast group on device dev. */ -void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) +static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, + unsigned int mode) { struct ip_mc_list *im; #ifdef CONFIG_IP_MULTICAST diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 1e4cf3ab560f..0d70608cc2e1 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, { struct inet_frag_queue *q; - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) - return NULL; - q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) { struct inet_frag_queue *fq; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) + return NULL; + rcu_read_lock(); fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 8e9528ebaa8e..d14d741fb05e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -383,11 +383,16 @@ found: int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ if (i < next->len) { + int delta = -next->truesize; + /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; + delta += next->truesize; + if (delta) + add_frag_mem_limit(qp->q.net, delta); next->ip_defrag_offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b3308e9d9762..0e3edd25f881 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 64c76dcf7386..c0fe5ad996f2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) { struct sockaddr_in sin; const struct iphdr *iph = ip_hdr(skb); - __be16 *ports = (__be16 *)skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 > (int)skb->len) + end = skb_transport_offset(skb) + 4; + if (end > 0 && !pskb_may_pull(skb, end)) return; /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin.sin_family = AF_INET; sin.sin_addr.s_addr = iph->daddr; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 58e2f479ffb4..4bfff3c87e8e 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ cwnd = (cwnd + 1) & ~1U; + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) + cwnd += 2; + return cwnd; } diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 5869f89ca656..8b637f9f23a2 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=0 to CE=1 and delayed - * ACK has not sent yet. - */ - if (!ca->ce_state && - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=0. */ - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (!ca->ce_state) { + /* State has changed from CE=0 to CE=1, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; @@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - /* State has changed from CE=1 to CE=0 and delayed - * ACK has not sent yet. - */ - if (ca->ce_state && - inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { - u32 tmp_rcv_nxt; - - /* Save current rcv_nxt. */ - tmp_rcv_nxt = tp->rcv_nxt; - - /* Generate previous ack with CE=1. */ - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; - tp->rcv_nxt = ca->prior_rcv_nxt; - - tcp_send_ack(sk); - - /* Recover current rcv_nxt. */ - tp->rcv_nxt = tmp_rcv_nxt; + if (ca->ce_state) { + /* State has changed from CE=1 to CE=0, force an immediate + * ACK to reflect the new CE state. If an ACK was delayed, + * send that first to reflect the prior CE state. + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); + tcp_enter_quickack_mode(sk, 1); } ca->prior_rcv_nxt = tp->rcv_nxt; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8e5522c6833a..f9dcb29be12d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) icsk->icsk_ack.quick = quickacks; } -static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } +EXPORT_SYMBOL(tcp_enter_quickack_mode); /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. @@ -245,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp) static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { - if (tcp_hdr(skb)->cwr) + if (tcp_hdr(skb)->cwr) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + + /* If the sender is telling us it has entered CWR, then its + * cwnd may be very low (even just 1 packet), so we should ACK + * immediately. + */ + tcp_enter_quickack_mode((struct sock *)tp, 2); + } } static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) @@ -4357,6 +4365,23 @@ static bool tcp_try_coalesce(struct sock *sk, return true; } +static bool tcp_ooo_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from, + bool *fragstolen) +{ + bool res = tcp_try_coalesce(sk, to, from, fragstolen); + + /* In case tcp_drop() is called later, update to->gso_segs */ + if (res) { + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + + max_t(u16, 1, skb_shinfo(from)->gso_segs); + + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); + } + return res; +} + static void tcp_drop(struct sock *sk, struct sk_buff *skb) { sk_drops_add(sk, skb); @@ -4480,8 +4505,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) /* In the typical case, we are adding an skb to the end of the list. * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. */ - if (tcp_try_coalesce(sk, tp->ooo_last_skb, - skb, &fragstolen)) { + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, + skb, &fragstolen)) { coalesce_done: tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); @@ -4509,7 +4534,7 @@ coalesce_done: /* All the bits are present. Drop. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb); + tcp_drop(sk, skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; @@ -4528,11 +4553,11 @@ coalesce_done: TCP_SKB_CB(skb1)->end_seq); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); - __kfree_skb(skb1); + tcp_drop(sk, skb1); goto merge_right; } - } else if (tcp_try_coalesce(sk, skb1, - skb, &fragstolen)) { + } else if (tcp_ooo_try_coalesce(sk, skb1, + skb, &fragstolen)) { goto coalesce_done; } p = &parent->rb_right; @@ -4901,6 +4926,7 @@ end: static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + u32 range_truesize, sum_tiny = 0; struct sk_buff *skb, *head; u32 start, end; @@ -4912,6 +4938,7 @@ new_range: } start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; + range_truesize = skb->truesize; for (head = skb;;) { skb = skb_rb_next(skb); @@ -4922,11 +4949,20 @@ new_range: if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { - tcp_collapse(sk, NULL, &tp->out_of_order_queue, - head, skb, start, end); + /* Do not attempt collapsing tiny skbs */ + if (range_truesize != head->truesize || + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { + tcp_collapse(sk, NULL, &tp->out_of_order_queue, + head, skb, start, end); + } else { + sum_tiny += range_truesize; + if (sum_tiny > sk->sk_rcvbuf >> 3) + return; + } goto new_range; } + range_truesize += skb->truesize; if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) @@ -4941,6 +4977,7 @@ new_range: * 2) not add too big latencies if thousands of packets sit there. * (But if application shrinks SO_RCVBUF, we could still end up * freeing whole queue here) + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. * * Return true if queue has shrunk. */ @@ -4948,20 +4985,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct rb_node *node, *prev; + int goal; if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) return false; NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); + goal = sk->sk_rcvbuf >> 3; node = &tp->ooo_last_skb->rbnode; do { prev = rb_prev(node); rb_erase(node, &tp->out_of_order_queue); + goal -= rb_to_skb(node)->truesize; tcp_drop(sk, rb_to_skb(node)); - sk_mem_reclaim(sk); - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && - !tcp_under_memory_pressure(sk)) - break; + if (!prev || goal <= 0) { + sk_mem_reclaim(sk); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; + goal = sk->sk_rcvbuf >> 3; + } node = prev; } while (node); tp->ooo_last_skb = rb_to_skb(prev); @@ -4996,6 +5039,9 @@ static int tcp_prune_queue(struct sock *sk) else if (tcp_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) + return 0; + tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, NULL, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 00e5a300ddb9..c4172c1fb198 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp, } /* Account for an ACK we sent. */ -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + u32 rcv_nxt) { struct tcp_sock *tp = tcp_sk(sk); @@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) __sock_put(sk); } + + if (unlikely(rcv_nxt != tp->rcv_nxt)) + return; /* Special ACK sent by DCTCP to reflect ECN */ tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } @@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */ -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, - gfp_t gfp_mask) +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; @@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); - th->ack_seq = htonl(tp->rcv_nxt); + th->ack_seq = htonl(rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); @@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, icsk->icsk_af_ops->send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); if (skb->len != tcp_header_size) { tcp_event_data_sent(tp, sk); @@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, return err; } +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask) +{ + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, + tcp_sk(sk)->rcv_nxt); +} + /* This routine just queues the buffer for sending. * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, @@ -3571,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk) } /* This routine sends an ack and also updates the window. */ -void tcp_send_ack(struct sock *sk) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) { struct sk_buff *buff; @@ -3604,9 +3615,14 @@ void tcp_send_ack(struct sock *sk) skb_set_tcp_pure_ack(buff); /* Send it off, this clears delayed acks for us. */ - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); +} +EXPORT_SYMBOL_GPL(__tcp_send_ack); + +void tcp_send_ack(struct sock *sk) +{ + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } -EXPORT_SYMBOL_GPL(tcp_send_ack); /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 91580c62bb86..f66a1cae3366 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, continue; if ((rt->fib6_flags & noflags) != 0) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; break; } out: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2ee08b6a86a4..1a1f876f8e28 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, } if (np->rxopt.bits.rxorigdstaddr) { struct sockaddr_in6 sin6; - __be16 *ports = (__be16 *) skb_transport_header(skb); + __be16 *ports; + int end; - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { + end = skb_transport_offset(skb) + 4; + if (end <= 0 || pskb_may_pull(skb, end)) { /* All current transport protocols have the port numbers in the * first four bytes of the transport header and this function is * written with this assumption in mind. */ + ports = (__be16 *)skb_transport_header(skb); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ipv6_hdr(skb)->daddr; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 97513f35bcc5..88a7579c23bd 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -669,8 +669,10 @@ skip_cow: sg_init_table(sg, nfrags); ret = skb_to_sgvec(skb, sg, 0, skb->len); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + kfree(tmp); goto out; + } skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index be491bf6ab6e..ef2505aefc15 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb) /* for local traffic to local address, skb dev is the loopback * device. Check if there is a dst attached to the skb and if so - * get the real device index. + * get the real device index. Same is needed for replies to a link + * local address on a device enslaved to an L3 master device */ - if (unlikely(iif == LOOPBACK_IFINDEX)) { + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { const struct rt6_info *rt6 = skb_rt6_info(skb); if (rt6) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a14fb4fcdf18..3168847c30d1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->dev = from->dev; to->mark = from->mark; + skb_copy_hash(to, from); + #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index b7f28deddaea..c72ae3a4fe09 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_dst_release; } - skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); - skb_dst_set(skb, dst); - skb->dev = skb_dst(skb)->dev; - mtu = dst_mtu(dst); if (!skb->ignore_df && skb->len > mtu) { skb_dst_update_pmtu(skb, mtu); @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) htonl(mtu)); } - return -EMSGSIZE; + err = -EMSGSIZE; + goto tx_err_dst_release; } + skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); + skb_dst_set(skb, dst); + skb->dev = skb_dst(skb)->dev; + err = dst_output(t->net, skb->sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 2699be7202be..f60f310785fd 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) spin_lock_bh(&im->mca_lock); if (pmc) { im->idev = pmc->idev; - im->mca_sfmode = pmc->mca_sfmode; - if (pmc->mca_sfmode == MCAST_INCLUDE) { + if (im->mca_sfmode == MCAST_INCLUDE) { im->mca_tomb = pmc->mca_tomb; im->mca_sources = pmc->mca_sources; for (psf = im->mca_sources; psf; psf = psf->sf_next) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2ce0bd17de4f..ec18b3ce8b6d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) rt->dst.lastuse = jiffies; } +/* Caller must already hold reference to @from */ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) { rt->rt6i_flags &= ~RTF_EXPIRES; - fib6_info_hold(from); rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); if (from->fib6_metrics != &dst_default_metrics) { @@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) } } +/* Caller must already hold reference to @ort */ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) { struct net_device *dev = fib6_info_nh_dev(ort); @@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt) struct net_device *dev = rt->fib6_nh.nh_dev; struct rt6_info *nrt; + if (!fib6_info_hold_safe(rt)) + return NULL; + nrt = ip6_dst_alloc(dev_net(dev), dev, flags); if (nrt) ip6_rt_copy_init(nrt, rt); + else + fib6_info_release(rt); return nrt; } @@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort, * Clone the route. */ + if (!fib6_info_hold_safe(ort)) + return NULL; + dev = ip6_rt_get_dev_rcu(ort); rt = ip6_dst_alloc(dev_net(dev), dev, 0); - if (!rt) + if (!rt) { + fib6_info_release(ort); return NULL; + } ip6_rt_copy_init(rt, ort); rt->rt6i_flags |= RTF_CACHE; @@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt) struct net_device *dev; struct rt6_info *pcpu_rt; + if (!fib6_info_hold_safe(rt)) + return NULL; + rcu_read_lock(); dev = ip6_rt_get_dev_rcu(rt); pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); rcu_read_unlock(); - if (!pcpu_rt) + if (!pcpu_rt) { + fib6_info_release(rt); return NULL; + } ip6_rt_copy_init(pcpu_rt, rt); pcpu_rt->rt6i_flags |= RTF_PCPU; return pcpu_rt; @@ -2486,7 +2502,7 @@ restart: out: if (ret) - dst_hold(&ret->dst); + ip6_hold_safe(net, &ret, true); else ret = ip6_create_rt_rcu(rt); @@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg, continue; if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; rcu_read_unlock(); /* if gateway was specified only delete the one hop */ @@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu rcu_read_lock(); from = rcu_dereference(rt->from); + /* This fib6_info_hold() is safe here because we hold reference to rt + * and rt already holds reference to fib6_info. + */ fib6_info_hold(from); rcu_read_unlock(); @@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net, continue; if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) continue; - fib6_info_hold(rt); + if (!fib6_info_hold_safe(rt)) + continue; break; } out: @@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net, ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) break; } - if (rt) - fib6_info_hold(rt); + if (rt && !fib6_info_hold_safe(rt)) + rt = NULL; rcu_read_unlock(); return rt; } @@ -3579,8 +3600,8 @@ restart: struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && - (!idev || idev->cnf.accept_ra != 2)) { - fib6_info_hold(rt); + (!idev || idev->cnf.accept_ra != 2) && + fib6_info_hold_safe(rt)) { rcu_read_unlock(); ip6_del_rt(net, rt); goto restart; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7efa9fd7e109..03e6b7a2bc53 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, - ntohs(th->source), tcp_v6_iif(skb), + ntohs(th->source), + tcp_v6_iif_l3_slave(skb), tcp_v6_sdif(skb)); if (!sk1) goto out; @@ -1609,7 +1610,8 @@ do_time_wait: skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, - ntohs(th->dest), tcp_v6_iif(skb), + ntohs(th->dest), + tcp_v6_iif_l3_slave(skb), sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e398797878a9..cf6cca260e7b 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, l2tp_session_get(sock_net(sk), tunnel, stats.session_id); - if (session && session->pwtype == L2TP_PWTYPE_PPP) { - err = pppol2tp_session_ioctl(session, cmd, - arg); + if (!session) { + err = -EBADR; + break; + } + if (session->pwtype != L2TP_PWTYPE_PPP) { l2tp_session_dec_refcount(session); - } else { err = -EBADR; + break; } + + err = pppol2tp_session_ioctl(session, cmd, arg); + l2tp_session_dec_refcount(session); break; } #ifdef CONFIG_XFRM diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0a38cc1cbebc..932985ca4e66 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, sdata->control_port_over_nl80211)) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); bool noencrypt = status->flag & RX_FLAG_DECRYPTED; - struct ethhdr *ehdr = eth_hdr(skb); - cfg80211_rx_control_port(dev, skb->data, skb->len, - ehdr->h_source, - be16_to_cpu(skb->protocol), noencrypt); + cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); } else { /* deliver to local stack */ diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5e2e511c4a6f..d02fbfec3783 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (!sta->uploaded) continue; - if (sta->sdata->vif.type != NL80211_IFTYPE_AP) + if (sta->sdata->vif.type != NL80211_IFTYPE_AP && + sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) continue; for (state = IEEE80211_STA_NOTEXIST; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index abe647d5b8c6..9ce6336d1e55 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, [CT_DCCP_ROLE_SERVER] = { @@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = * We currently ignore Sync packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, [DCCP_PKT_SYNCACK] = { /* * We currently ignore SyncAck packets * * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ - sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, + sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, }, }, }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 896d4a36081d..f5745e4c6513 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, { ctx->net = net; ctx->family = family; + ctx->level = 0; ctx->table = table; ctx->chain = chain; ctx->nla = nla; @@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, struct nft_base_chain *basechain; struct nft_stats *stats = NULL; struct nft_chain_hook hook; - const struct nlattr *name; struct nf_hook_ops *ops; struct nft_trans *trans; int err; @@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return PTR_ERR(stats); } + err = -ENOMEM; trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); - if (trans == NULL) { - free_percpu(stats); - return -ENOMEM; - } + if (trans == NULL) + goto err; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, else nft_trans_chain_policy(trans) = -1; - name = nla[NFTA_CHAIN_NAME]; - if (nla[NFTA_CHAIN_HANDLE] && name) { - nft_trans_chain_name(trans) = - nla_strdup(name, GFP_KERNEL); - if (!nft_trans_chain_name(trans)) { - kfree(trans); - free_percpu(stats); - return -ENOMEM; + if (nla[NFTA_CHAIN_HANDLE] && + nla[NFTA_CHAIN_NAME]) { + struct nft_trans *tmp; + char *name; + + err = -ENOMEM; + name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + if (!name) + goto err; + + err = -EEXIST; + list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) { + if (tmp->msg_type == NFT_MSG_NEWCHAIN && + tmp->ctx.table == table && + nft_trans_chain_update(tmp) && + nft_trans_chain_name(tmp) && + strcmp(name, nft_trans_chain_name(tmp)) == 0) { + kfree(name); + goto err; + } } + + nft_trans_chain_name(trans) = name; } list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; +err: + free_percpu(stats); + kfree(trans); + return err; } static int nf_tables_newchain(struct net *net, struct sock *nlsk, @@ -2254,6 +2271,39 @@ done: return skb->len; } +static int nf_tables_dump_rules_start(struct netlink_callback *cb) +{ + const struct nlattr * const *nla = cb->data; + struct nft_rule_dump_ctx *ctx = NULL; + + if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + if (nla[NFTA_RULE_TABLE]) { + ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], + GFP_ATOMIC); + if (!ctx->table) { + kfree(ctx); + return -ENOMEM; + } + } + if (nla[NFTA_RULE_CHAIN]) { + ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], + GFP_ATOMIC); + if (!ctx->chain) { + kfree(ctx->table); + kfree(ctx); + return -ENOMEM; + } + } + } + + cb->data = ctx; + return 0; +} + static int nf_tables_dump_rules_done(struct netlink_callback *cb) { struct nft_rule_dump_ctx *ctx = cb->data; @@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start= nf_tables_dump_rules_start, .dump = nf_tables_dump_rules, .done = nf_tables_dump_rules_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) { - struct nft_rule_dump_ctx *ctx; - - ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); - if (!ctx) - return -ENOMEM; - - if (nla[NFTA_RULE_TABLE]) { - ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], - GFP_ATOMIC); - if (!ctx->table) { - kfree(ctx); - return -ENOMEM; - } - } - if (nla[NFTA_RULE_CHAIN]) { - ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], - GFP_ATOMIC); - if (!ctx->chain) { - kfree(ctx->table); - kfree(ctx); - return -ENOMEM; - } - } - c.data = ctx; - } - return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) struct nft_rule *rule; int err; + if (ctx->level == NFT_JUMP_STACK_SIZE) + return -EMLINK; + list_for_each_entry(rule, &chain->rules, list) { if (!nft_is_active_next(ctx->net, rule)) continue; @@ -3161,6 +3189,18 @@ done: return skb->len; } +static int nf_tables_dump_sets_start(struct netlink_callback *cb) +{ + struct nft_ctx *ctx_dump = NULL; + + ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC); + if (ctx_dump == NULL) + return -ENOMEM; + + cb->data = ctx_dump; + return 0; +} + static int nf_tables_dump_sets_done(struct netlink_callback *cb) { kfree(cb->data); @@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_sets_start, .dump = nf_tables_dump_sets, .done = nf_tables_dump_sets_done, + .data = &ctx, .module = THIS_MODULE, }; - struct nft_ctx *ctx_dump; - - ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC); - if (ctx_dump == NULL) - return -ENOMEM; - - *ctx_dump = ctx; - c.data = ctx_dump; return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -3849,6 +3883,15 @@ nla_put_failure: return -ENOSPC; } +static int nf_tables_dump_set_start(struct netlink_callback *cb) +{ + struct nft_set_dump_ctx *dump_ctx = cb->data; + + cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC); + + return cb->data ? 0 : -ENOMEM; +} + static int nf_tables_dump_set_done(struct netlink_callback *cb) { kfree(cb->data); @@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_set_start, .dump = nf_tables_dump_set, .done = nf_tables_dump_set_done, .module = THIS_MODULE, }; - struct nft_set_dump_ctx *dump_ctx; - - dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); - if (!dump_ctx) - return -ENOMEM; - - dump_ctx->set = set; - dump_ctx->ctx = ctx; + struct nft_set_dump_ctx dump_ctx = { + .set = set, + .ctx = ctx, + }; - c.data = dump_ctx; + c.data = &dump_ctx; return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -4975,38 +5015,42 @@ done: return skb->len; } -static int nf_tables_dump_obj_done(struct netlink_callback *cb) +static int nf_tables_dump_obj_start(struct netlink_callback *cb) { - struct nft_obj_filter *filter = cb->data; + const struct nlattr * const *nla = cb->data; + struct nft_obj_filter *filter = NULL; - if (filter) { - kfree(filter->table); - kfree(filter); + if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) { + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return -ENOMEM; + + if (nla[NFTA_OBJ_TABLE]) { + filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); + if (!filter->table) { + kfree(filter); + return -ENOMEM; + } + } + + if (nla[NFTA_OBJ_TYPE]) + filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); } + cb->data = filter; return 0; } -static struct nft_obj_filter * -nft_obj_filter_alloc(const struct nlattr * const nla[]) +static int nf_tables_dump_obj_done(struct netlink_callback *cb) { - struct nft_obj_filter *filter; - - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); - if (!filter) - return ERR_PTR(-ENOMEM); + struct nft_obj_filter *filter = cb->data; - if (nla[NFTA_OBJ_TABLE]) { - filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); - if (!filter->table) { - kfree(filter); - return ERR_PTR(-ENOMEM); - } + if (filter) { + kfree(filter->table); + kfree(filter); } - if (nla[NFTA_OBJ_TYPE]) - filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); - return filter; + return 0; } /* called with rcu_read_lock held */ @@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_obj_start, .dump = nf_tables_dump_obj, .done = nf_tables_dump_obj_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_OBJ_TABLE] || - nla[NFTA_OBJ_TYPE]) { - struct nft_obj_filter *filter; - - filter = nft_obj_filter_alloc(nla); - if (IS_ERR(filter)) - return -ENOMEM; - - c.data = filter; - } return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, flowtable->ops[i].priv = &flowtable->data; flowtable->ops[i].hook = flowtable->data.type->hook; flowtable->ops[i].dev = dev_array[i]; - flowtable->dev_name[i] = kstrdup(dev_array[i]->name, - GFP_KERNEL); } return err; @@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, err6: i = flowtable->ops_len; err5: - for (k = i - 1; k >= 0; k--) { - kfree(flowtable->dev_name[k]); + for (k = i - 1; k >= 0; k--) nf_unregister_net_hook(net, &flowtable->ops[k]); - } kfree(flowtable->ops); err4: @@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; for (i = 0; i < flowtable->ops_len; i++) { - if (flowtable->dev_name[i][0] && - nla_put_string(skb, NFTA_DEVICE_NAME, - flowtable->dev_name[i])) + const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev); + + if (dev && + nla_put_string(skb, NFTA_DEVICE_NAME, dev->name)) goto nla_put_failure; } nla_nest_end(skb, nest_devs); @@ -5650,37 +5683,39 @@ done: return skb->len; } -static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) +static int nf_tables_dump_flowtable_start(struct netlink_callback *cb) { - struct nft_flowtable_filter *filter = cb->data; + const struct nlattr * const *nla = cb->data; + struct nft_flowtable_filter *filter = NULL; - if (!filter) - return 0; + if (nla[NFTA_FLOWTABLE_TABLE]) { + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return -ENOMEM; - kfree(filter->table); - kfree(filter); + filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], + GFP_ATOMIC); + if (!filter->table) { + kfree(filter); + return -ENOMEM; + } + } + cb->data = filter; return 0; } -static struct nft_flowtable_filter * -nft_flowtable_filter_alloc(const struct nlattr * const nla[]) +static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) { - struct nft_flowtable_filter *filter; + struct nft_flowtable_filter *filter = cb->data; - filter = kzalloc(sizeof(*filter), GFP_ATOMIC); if (!filter) - return ERR_PTR(-ENOMEM); + return 0; - if (nla[NFTA_FLOWTABLE_TABLE]) { - filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], - GFP_ATOMIC); - if (!filter->table) { - kfree(filter); - return ERR_PTR(-ENOMEM); - } - } - return filter; + kfree(filter->table); + kfree(filter); + + return 0; } /* called with rcu_read_lock held */ @@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = nf_tables_dump_flowtable_start, .dump = nf_tables_dump_flowtable, .done = nf_tables_dump_flowtable_done, .module = THIS_MODULE, + .data = (void *)nla, }; - if (nla[NFTA_FLOWTABLE_TABLE]) { - struct nft_flowtable_filter *filter; - - filter = nft_flowtable_filter_alloc(nla); - if (IS_ERR(filter)) - return -ENOMEM; - - c.data = filter; - } return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); } @@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) kfree(flowtable->name); flowtable->data.type->free(&flowtable->data); module_put(flowtable->data.type->owner); + kfree(flowtable); } static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, @@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, continue; nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); - flowtable->dev_name[i][0] = '\0'; flowtable->ops[i].dev = NULL; break; } @@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans) case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); break; + case NFT_MSG_NEWCHAIN: + kfree(nft_trans_chain_name(trans)); + break; case NFT_MSG_DELCHAIN: nf_tables_chain_destroy(&trans->ctx); break; @@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: - if (nft_trans_chain_update(trans)) + if (nft_trans_chain_update(trans)) { nft_chain_commit_update(trans); - else + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + /* trans destroyed after rcu grace period */ + } else { nft_clear(net, trans->ctx.chain); - - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); - nft_trans_destroy(trans); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + nft_trans_destroy(trans); + } break; case NFT_MSG_DELCHAIN: nft_chain_del(trans->ctx.chain); @@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net) case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { free_percpu(nft_trans_chain_stats(trans)); - + kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; @@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx, err = nf_tables_check_loops(ctx, data->verdict.chain); if (err < 0) return err; - - if (ctx->chain->level + 1 > - data->verdict.chain->level) { - if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) - return -EMLINK; - data->verdict.chain->level = ctx->chain->level + 1; - } } return 0; diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 15adf8ca82c3..0777a93211e2 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, const struct nft_data **d) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); + struct nft_ctx *pctx = (struct nft_ctx *)ctx; const struct nft_data *data; int err; @@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx, switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: + pctx->level++; err = nft_chain_validate(ctx, data->verdict.chain); if (err < 0) return err; + pctx->level--; break; default: break; diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 42e6fadf1417..c2a1d84cdfc4 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, struct nft_set_elem *elem) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); + struct nft_ctx *pctx = (struct nft_ctx *)ctx; const struct nft_data *data; + int err; if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) @@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx, switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - return nft_chain_validate(ctx, data->verdict.chain); + pctx->level++; + err = nft_chain_validate(ctx, data->verdict.chain); + if (err < 0) + return err; + pctx->level--; + break; default: - return 0; + break; } + + return 0; } static int nft_lookup_validate(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 72ef35b51cac..90c3e7e6cacb 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set) struct nft_rhash *priv = nft_set_priv(set); cancel_delayed_work_sync(&priv->gc_work); + rcu_barrier(); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)set); } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 1f8f257cb518..9873d734b494 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work) gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); if (!gcb) - goto out; + break; atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, rbe); @@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work) rbe = rb_entry(prev, struct nft_rbtree_elem, node); atomic_dec(&set->nelems); nft_set_gc_batch_add(gcb, rbe); + prev = NULL; } node = rb_next(node); + if (!node) + break; } -out: if (gcb) { for (i = 0; i < gcb->head.cnt; i++) { rbe = gcb->elems[i]; @@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) struct rb_node *node; cancel_delayed_work_sync(&priv->gc_work); + rcu_barrier(); while ((node = priv->root.rb_node) != NULL) { rb_erase(node, &priv->root); rbe = rb_entry(node, struct nft_rbtree_elem, node); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 393573a99a5a..56704d95f82d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -63,6 +63,7 @@ #include <linux/hash.h> #include <linux/genetlink.h> #include <linux/net_namespace.h> +#include <linux/nospec.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; + protocol = array_index_nospec(protocol, MAX_LINKS); netlink_lock_table(); #ifdef CONFIG_MODULES @@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, return err; } + if (nlk->ngroups == 0) + groups = 0; + else if (nlk->ngroups < 8*sizeof(groups)) + groups &= (1UL << nlk->ngroups) - 1; + bound = nlk->bound; if (bound) { /* Ensure nlk->portid is up-to-date. */ diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index b891a91577f8..c038e021a591 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) if (!meter) return ERR_PTR(-ENOMEM); + meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]); meter->used = div_u64(ktime_get_ns(), 1000 * 1000); meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; @@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) u32 meter_id; bool failed; + if (!a[OVS_METER_ATTR_ID]) { + return -ENODEV; + } + meter = dp_meter_create(a); if (IS_ERR_OR_NULL(meter)) return PTR_ERR(meter); @@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) goto exit_unlock; } - if (!a[OVS_METER_ATTR_ID]) { - err = -ENODEV; - goto exit_unlock; - } - meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); /* Cannot fail after this. */ diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 48332a6ed738..d152e48ea371 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev, struct rds_ib_frmr *frmr; int ret; + if (!ic) { + /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/ + return ERR_PTR(-EOPNOTSUPP); + } + do { if (ibmr) rds_ib_free_frmr(ibmr, true); diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 0ea4ab017a8c..655f01d427fe 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, - struct rds_sock *rs, u32 *key_ret); + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn); void rds_ib_sync_mr(void *trans_private, int dir); void rds_ib_free_mr(void *trans_private, int invalidate); void rds_ib_flush_mrs(void); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e678699268a2..2e49a40a5e11 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void) } void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, - struct rds_sock *rs, u32 *key_ret) + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn) { struct rds_ib_device *rds_ibdev; struct rds_ib_mr *ibmr = NULL; - struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; + struct rds_ib_connection *ic = NULL; int ret; rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); @@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, goto out; } + if (conn) + ic = conn->c_transport_data; + if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { ret = -ENODEV; goto out; @@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); else ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); - if (ibmr) - rds_ibdev = NULL; - - out: - if (!ibmr) + if (IS_ERR(ibmr)) { + ret = PTR_ERR(ibmr); pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); + } else { + return ibmr; + } + out: if (rds_ibdev) rds_ib_dev_put(rds_ibdev); - return ibmr; + return ERR_PTR(ret); } void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 634cfcb7bba6..80920e47f2c7 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, } static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, - u64 *cookie_ret, struct rds_mr **mr_ret) + u64 *cookie_ret, struct rds_mr **mr_ret, + struct rds_conn_path *cp) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; @@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, - &mr->r_key); + &mr->r_key, + cp ? cp->cp_conn : NULL); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) @@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) sizeof(struct rds_get_mr_args))) return -EFAULT; - return __rds_rdma_map(rs, &args, NULL, NULL); + return __rds_rdma_map(rs, &args, NULL, NULL, NULL); } int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) @@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) new_args.cookie_addr = args.cookie_addr; new_args.flags = args.flags; - return __rds_rdma_map(rs, &new_args, NULL, NULL); + return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL); } /* @@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, rm->m_rdma_cookie != 0) return -EINVAL; - return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); + return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, + &rm->rdma.op_rdma_mr, rm->m_conn_path); } /* diff --git a/net/rds/rds.h b/net/rds/rds.h index f2272fb8cd45..60b3b787fbdb 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -464,6 +464,8 @@ struct rds_message { struct scatterlist *op_sg; } data; }; + + struct rds_conn_path *m_conn_path; }; /* @@ -544,7 +546,8 @@ struct rds_transport { unsigned int avail); void (*exit)(void); void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, - struct rds_sock *rs, u32 *key_ret); + struct rds_sock *rs, u32 *key_ret, + struct rds_connection *conn); void (*sync_mr)(void *trans_private, int direction); void (*free_mr)(void *trans_private, int invalidate); void (*flush_mrs)(void); diff --git a/net/rds/send.c b/net/rds/send.c index 94c7f74909be..59f17a2335f4 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) rs->rs_conn = conn; } + if (conn->c_trans->t_mp_capable) + cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; + else + cpath = &conn->c_path[0]; + + rm->m_conn_path = cpath; + /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); if (ret) { @@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) goto out; } - if (conn->c_trans->t_mp_capable) - cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; - else - cpath = &conn->c_path[0]; - if (rds_destroy_pending(conn)) { ret = -EAGAIN; goto out; diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a9a9be5519b9..9d1e298b784c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); - if (user_call_ID < call->user_call_ID) + if (user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; - else if (user_call_ID > call->user_call_ID) + else if (user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto id_in_use; diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a7e8d63fc8ae..9bde1e4ca288 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, /* force immediate tx of current consumer cursor, but * under send_lock to guarantee arrival in seqno-order */ - smc_tx_sndbuf_nonempty(conn); + if (smc->sk.sk_state != SMC_INIT) + smc_tx_sndbuf_nonempty(conn); } } diff --git a/net/socket.c b/net/socket.c index 85633622c94d..8c24d5dc4bc8 100644 --- a/net/socket.c +++ b/net/socket.c @@ -89,6 +89,7 @@ #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> +#include <linux/nospec.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -2522,6 +2523,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; + call = array_index_nospec(call, SYS_SENDMMSG + 1); len = nargs[call]; if (len > sizeof(a)) @@ -2688,7 +2690,8 @@ EXPORT_SYMBOL(sock_unregister); bool sock_is_registered(int family) { - return family < NPROTO && rcu_access_pointer(net_families[family]); + return family < NPROTO && + rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]); } static int __init sock_init(void) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4618f1c31137..1f3d9789af30 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, return NULL; } + if (sk->sk_shutdown & RCV_SHUTDOWN) + return NULL; + if (sock_flag(sk, SOCK_DONE)) return NULL; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4eece06be1e7..80bc986c79e5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info, params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); + break; default: return -EINVAL; } @@ -14923,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, EXPORT_SYMBOL(cfg80211_mgmt_tx_status); static int __nl80211_rx_control_port(struct net_device *dev, - const u8 *buf, size_t len, - const u8 *addr, u16 proto, + struct sk_buff *skb, bool unencrypted, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + struct ethhdr *ehdr = eth_hdr(skb); + const u8 *addr = ehdr->h_source; + u16 proto = be16_to_cpu(skb->protocol); struct sk_buff *msg; void *hdr; + struct nlattr *frame; + u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); if (!nlportid) return -ENOENT; - msg = nlmsg_new(100 + len, gfp); + msg = nlmsg_new(100 + skb->len, gfp); if (!msg) return -ENOMEM; @@ -14950,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev, nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || - nla_put(msg, NL80211_ATTR_FRAME, len, buf) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || (unencrypted && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) goto nla_put_failure; + frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); + if (!frame) + goto nla_put_failure; + + skb_copy_bits(skb, 0, nla_data(frame), skb->len); genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); @@ -14967,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev, } bool cfg80211_rx_control_port(struct net_device *dev, - const u8 *buf, size_t len, - const u8 *addr, u16 proto, bool unencrypted) + struct sk_buff *skb, bool unencrypted) { int ret; - trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); - ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, - unencrypted, GFP_ATOMIC); + trace_cfg80211_rx_control_port(dev, skb, unencrypted); + ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC); trace_cfg80211_return_bool(ret == 0); return ret == 0; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bbe6298e4bb9..4fc66a117b7d 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && - wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) + wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && + !(wiphy->regulatory_flags & + REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } @@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request) } } -static bool reg_only_self_managed_wiphys(void) -{ - struct cfg80211_registered_device *rdev; - struct wiphy *wiphy; - bool self_managed_found = false; - - ASSERT_RTNL(); - - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - wiphy = &rdev->wiphy; - if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) - self_managed_found = true; - else - return false; - } - - /* make sure at least one self-managed wiphy exists */ - return self_managed_found; -} - /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we @@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); notify_self_managed_wiphys(reg_request); - if (reg_only_self_managed_wiphys()) { - reg_free_request(reg_request); - return; - } reg_process_hint(reg_request); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 2b417a2fe63f..7c73510b161f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status, ); TRACE_EVENT(cfg80211_rx_control_port, - TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, - const u8 *addr, u16 proto, bool unencrypted), - TP_ARGS(netdev, buf, len, addr, proto, unencrypted), + TP_PROTO(struct net_device *netdev, struct sk_buff *skb, + bool unencrypted), + TP_ARGS(netdev, skb, unencrypted), TP_STRUCT__entry( NETDEV_ENTRY - MAC_ENTRY(addr) + __field(int, len) + MAC_ENTRY(from) __field(u16, proto) __field(bool, unencrypted) ), TP_fast_assign( NETDEV_ASSIGN; - MAC_ASSIGN(addr, addr); - __entry->proto = proto; + __entry->len = skb->len; + MAC_ASSIGN(from, eth_hdr(skb)->h_source); + __entry->proto = be16_to_cpu(skb->protocol); __entry->unencrypted = unencrypted; ), - TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", - NETDEV_PR_ARG, MAC_PR_ARG(addr), + TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s", + NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), __entry->proto, BOOL_TO_STR(__entry->unencrypted)) ); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 72335c2e8108..4e937cd7c17d 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); - if (err) { - xdp_return_buff(xdp); + if (err) xs->rx_dropped++; - } return err; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 52ecaf770642..8a64b150be54 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q) static inline bool xskq_empty_desc(struct xsk_queue *q) { - return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; + return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; } void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f48251c1319..7c5e8978aeaa 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); + if (IS_ERR(dst)) + dst_release(dst_orig); + return dst; } EXPORT_SYMBOL(xfrm_lookup_route); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 080035f056d9..33878e6e0d0a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, { struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); - if (nlsk) - return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); - else - return -1; + if (!nlsk) { + kfree_skb(skb); + return -EPIPE; + } + + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); } static inline unsigned int xfrm_spdinfo_msgsize(void) @@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void) #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { - struct xfrm_userpolicy_type upt = { - .type = type, - }; + struct xfrm_userpolicy_type upt; + + /* Sadly there are two holes in struct xfrm_userpolicy_type */ + memset(&upt, 0, sizeof(upt)); + upt.type = type; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } |