diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 30 |
2 files changed, 36 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1969e16d936d..25a89eaa669d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) /* If reordering is high then always grow cwnd whenever data is * delivered regardless of its ordering. Otherwise stay conservative - * and only grow cwnd on in-order delivery in Open state, and retain - * cwnd in Disordered state (RFC5681). A stretched ACK with + * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) return flag & FLAG_FORWARD_PROGRESS; - return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && - flag & FLAG_DATA_ACKED; + return flag & FLAG_DATA_ACKED; } /* Check that window update is acceptable. @@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } else { + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; } @@ -4216,8 +4215,10 @@ add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: - if (skb) + if (skb) { + tcp_grow_window(sk, skb); skb_set_owner_r(skb, sk); + } } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a17dda1bbee0..8df7f64c6db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt) } EXPORT_SYMBOL_GPL(netlink_remove_tap); +static bool netlink_filter_tap(const struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + bool pass = false; + + /* We take the more conservative approach and + * whitelist socket protocols that may pass. + */ + switch (sk->sk_protocol) { + case NETLINK_ROUTE: + case NETLINK_USERSOCK: + case NETLINK_SOCK_DIAG: + case NETLINK_NFLOG: + case NETLINK_XFRM: + case NETLINK_FIB_LOOKUP: + case NETLINK_NETFILTER: + case NETLINK_GENERIC: + pass = true; + break; + } + + return pass; +} + static int __netlink_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *nskb; + struct sock *sk = skb->sk; int ret = -ENOMEM; dev_hold(dev); nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->dev = dev; + nskb->protocol = htons((u16) sk->sk_protocol); + ret = dev_queue_xmit(nskb); if (unlikely(ret > 0)) ret = net_xmit_errno(ret); @@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb) int ret; struct netlink_tap *tmp; + if (!netlink_filter_tap(skb)) + return; + list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { ret = __netlink_deliver_tap_skb(skb, tmp->dev); if (unlikely(ret)) |