From 46c0ef6e1eb95f619d9f62da4332749153db92f7 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Fri, 16 Mar 2018 11:35:51 +0900 Subject: xfrm: fix rcu_read_unlock usage in xfrm_local_error In the xfrm_local_error, rcu_read_unlock should be called when afinfo is not NULL. because xfrm_state_get_afinfo calls rcu_read_unlock if afinfo is NULL. Fixes: af5d27c4e12b ("xfrm: remove xfrm_state_put_afinfo") Signed-off-by: Taehee Yoo Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_output.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 23468672a767..89b178a78dc7 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -285,8 +285,9 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) return; afinfo = xfrm_state_get_afinfo(proto); - if (afinfo) + if (afinfo) { afinfo->local_error(skb, mtu); - rcu_read_unlock(); + rcu_read_unlock(); + } } EXPORT_SYMBOL_GPL(xfrm_local_error); -- cgit v1.2.1 From dd1df24737727e119c263acf1be2a92763938297 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:16:27 +0100 Subject: vti4: Don't count header length twice on tunnel setup This re-introduces the effect of commit a32452366b72 ("vti4: Don't count header length twice.") which was accidentally reverted by merge commit f895f0cfbb77 ("Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec"). The commit message from Steffen Klassert said: We currently count the size of LL_MAX_HEADER and struct iphdr twice for vti4 devices, this leads to a wrong device mtu. The size of LL_MAX_HEADER and struct iphdr is already counted in ip_tunnel_bind_dev(), so don't do it again in vti_tunnel_init(). And this is still the case now: ip_tunnel_bind_dev() already accounts for the header length of the link layer (not necessarily LL_MAX_HEADER, if the output device is found), plus one IP header. For example, with a vti device on top of veth, with MTU of 1500, the existing implementation would set the initial vti MTU to 1332, accounting once for LL_MAX_HEADER (128, included in hard_header_len by vti) and twice for the same IP header (once from hard_header_len, once from ip_tunnel_bind_dev()). It should instead be 1480, because ip_tunnel_bind_dev() is able to figure out that the output device is veth, so no additional link layer header is attached, and will properly count one single IP header. The existing issue had the side effect of avoiding PMTUD for most xfrm policies, by arbitrarily lowering the initial MTU. However, the only way to get a consistent PMTU value is to let the xfrm PMTU discovery do its course, and commit d6af1a31cc72 ("vti: Add pmtu handling to vti_xmit.") now takes care of local delivery cases where the application ignores local socket notifications. Fixes: b9959fd3b0fa ("vti: switch to new ip tunnel code") Fixes: f895f0cfbb77 ("Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 51b1669334fe..502e5222eaa9 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -387,7 +387,6 @@ static int vti_tunnel_init(struct net_device *dev) memcpy(dev->dev_addr, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN; dev->flags = IFF_NOARP; dev->addr_len = 4; -- cgit v1.2.1 From 24fc79798b8ddfd46f2dd363a8d29072c083b977 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:16:28 +0100 Subject: ip_tunnel: Clamp MTU to bounds on new link Otherwise, it's possible to specify invalid MTU values directly on creation of a link (via 'ip link add'). This is already prevented on subsequent MTU changes by commit b96f9afee4eb ("ipv4/6: use core net MTU range checking"). Fixes: c54419321455 ("GRE: Refactor GRE tunneling code.") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv4/ip_tunnel.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 6d21068f9b55..7c76dd17b6b9 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -1108,8 +1108,14 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], eth_hw_addr_random(dev); mtu = ip_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) + if (tb[IFLA_MTU]) { + unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; + + dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, + (unsigned int)(max - sizeof(struct iphdr))); + } else { dev->mtu = mtu; + } ip_tunnel_add(itn, nt); out: -- cgit v1.2.1 From 03080e5ec72740c1a62e6730f2a5f3f114f11b19 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:16:29 +0100 Subject: vti4: Don't override MTU passed on link creation via IFLA_MTU Don't hardcode a MTU value on vti tunnel initialization, ip_tunnel_newlink() is able to deal with this already. See also commit ffc2b6ee4174 ("ip_gre: fix IFLA_MTU ignored on NEWLINK"). Fixes: 1181412c1a67 ("net/ipv4: VTI support new module for ip_vti.") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 502e5222eaa9..3f091ccad9af 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -387,7 +387,6 @@ static int vti_tunnel_init(struct net_device *dev) memcpy(dev->dev_addr, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); - dev->mtu = ETH_DATA_LEN; dev->flags = IFF_NOARP; dev->addr_len = 4; dev->features |= NETIF_F_LLTX; -- cgit v1.2.1 From c6741fbed6dc0f183d26c4b6bca4517672f92e6c Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:17:11 +0100 Subject: vti6: Properly adjust vti6 MTU from MTU of lower device If a lower device is found, we don't need to subtract LL_MAX_HEADER to calculate our MTU: just use its MTU, the link layer headers are already taken into account by it. If the lower device is not found, start from ETH_DATA_LEN instead, and only in this case subtract a worst-case LL_MAX_HEADER. We then need to subtract our additional IPv6 header from the calculation. While at it, note that vti6 doesn't have a hardware header, so it doesn't need to set dev->hard_header_len. And as vti6_link_config() now always sets the MTU, there's no need to set a default value in vti6_dev_setup(). This makes the behaviour consistent with IPv4 vti, after commit a32452366b72 ("vti4: Don't count header length twice."), which was accidentally reverted by merge commit f895f0cfbb77 ("Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec"). While commit 53c81e95df17 ("ip6_vti: adjust vti mtu according to mtu of lower device") improved on the original situation, this was still not ideal. As reported in that commit message itself, if we start from an underlying veth MTU of 9000, we end up with an MTU of 8832, that is, 9000 - LL_MAX_HEADER - sizeof(ipv6hdr). This should simply be 8880, or 9000 - sizeof(ipv6hdr) instead: we found the lower device (veth) and we know we don't have any additional link layer header, so there's no need to subtract an hypothetical worst-case number. Fixes: 53c81e95df17 ("ip6_vti: adjust vti mtu according to mtu of lower device") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index fa3ae1cb50d3..2ceef41cc097 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -627,6 +627,7 @@ static void vti6_link_config(struct ip6_tnl *t) struct net_device *dev = t->dev; struct __ip6_tnl_parm *p = &t->parms; struct net_device *tdev = NULL; + int mtu; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); @@ -656,8 +657,11 @@ static void vti6_link_config(struct ip6_tnl *t) tdev = __dev_get_by_index(t->net, p->link); if (tdev) - dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len, - IPV6_MIN_MTU); + mtu = tdev->mtu - sizeof(struct ipv6hdr); + else + mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr); + + dev->mtu = max_t(int, mtu, IPV6_MIN_MTU); } /** @@ -866,8 +870,6 @@ static void vti6_dev_setup(struct net_device *dev) dev->priv_destructor = vti6_dev_free; dev->type = ARPHRD_TUNNEL6; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); - dev->mtu = ETH_DATA_LEN; dev->min_mtu = IPV6_MIN_MTU; dev->max_mtu = IP_MAX_MTU; dev->flags |= IFF_NOARP; -- cgit v1.2.1 From 7a67e69a339a6824be2fc483073782ab2f47fcd2 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:17:12 +0100 Subject: vti6: Keep set MTU on link creation or change, validate it In vti6_link_config(), if MTU is already given on link creation or change, validate and use it instead of recomputing it. To do that, we need to propagate the knowledge that MTU was set by userspace all the way down to vti6_link_config(). To keep this simple, vti6_dev_init() sets the new 'keep_mtu' argument of vti6_link_config() to true: on initialization, we don't have convenient access to netlink attributes there, but we will anyway check whether dev->mtu is set in vti6_link_config(). If it's non-zero, it was set to the value of the IFLA_MTU attribute during creation. Otherwise, determine a reasonable value. Fixes: ed1efb2aefbb ("ipv6: Add support for IPsec virtual tunnel interfaces") Fixes: 53c81e95df17 ("ip6_vti: adjust vti mtu according to mtu of lower device") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 2ceef41cc097..971175142e14 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -622,7 +622,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return 0; } -static void vti6_link_config(struct ip6_tnl *t) +static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu) { struct net_device *dev = t->dev; struct __ip6_tnl_parm *p = &t->parms; @@ -641,6 +641,11 @@ static void vti6_link_config(struct ip6_tnl *t) else dev->flags &= ~IFF_POINTOPOINT; + if (keep_mtu && dev->mtu) { + dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu); + return; + } + if (p->flags & IP6_TNL_F_CAP_XMIT) { int strict = (ipv6_addr_type(&p->raddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); @@ -668,12 +673,14 @@ static void vti6_link_config(struct ip6_tnl *t) * vti6_tnl_change - update the tunnel parameters * @t: tunnel to be changed * @p: tunnel configuration parameters + * @keep_mtu: MTU was set from userspace, don't re-compute it * * Description: * vti6_tnl_change() updates the tunnel parameters **/ static int -vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) +vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, + bool keep_mtu) { t->parms.laddr = p->laddr; t->parms.raddr = p->raddr; @@ -683,11 +690,12 @@ vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) t->parms.proto = p->proto; t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); - vti6_link_config(t); + vti6_link_config(t, keep_mtu); return 0; } -static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) +static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p, + bool keep_mtu) { struct net *net = dev_net(t->dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); @@ -695,7 +703,7 @@ static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) vti6_tnl_unlink(ip6n, t); synchronize_net(); - err = vti6_tnl_change(t, p); + err = vti6_tnl_change(t, p, keep_mtu); vti6_tnl_link(ip6n, t); netdev_state_change(t->dev); return err; @@ -808,7 +816,7 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } else t = netdev_priv(dev); - err = vti6_update(t, &p1); + err = vti6_update(t, &p1, false); } if (t) { err = 0; @@ -907,7 +915,7 @@ static int vti6_dev_init(struct net_device *dev) if (err) return err; - vti6_link_config(t); + vti6_link_config(t, true); return 0; } @@ -1012,7 +1020,7 @@ static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], } else t = netdev_priv(dev); - return vti6_update(t, &p); + return vti6_update(t, &p, tb && tb[IFLA_MTU]); } static size_t vti6_get_size(const struct net_device *dev) -- cgit v1.2.1 From f8a554b4aa9686bb2c12f6bae516e58783289a03 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 15 Mar 2018 17:17:13 +0100 Subject: vti6: Fix dev->max_mtu setting We shouldn't allow a tunnel to have IP_MAX_MTU as MTU, because another IPv6 header is going on top of our packets. Without this patch, we might end up building packets bigger than IP_MAX_MTU. Fixes: b96f9afee4eb ("ipv4/6: use core net MTU range checking") Signed-off-by: Stefano Brivio Acked-by: Sabrina Dubroca Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 971175142e14..ce18cd20389d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -879,7 +879,7 @@ static void vti6_dev_setup(struct net_device *dev) dev->type = ARPHRD_TUNNEL6; dev->min_mtu = IPV6_MIN_MTU; - dev->max_mtu = IP_MAX_MTU; + dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); -- cgit v1.2.1 From 4f2921ca21b71a9faaecd84a9fc74401d3a8d275 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 14 Mar 2018 13:37:58 +0100 Subject: netfilter: nf_tables: meter: pick a set backend that supports updates in nftables, 'meter' can be used to instantiate a hash-table at run time: rule add filter forward iif "internal" meter hostacct { ip saddr counter} nft list meter ip filter hostacct table ip filter { meter hostacct { type ipv4_addr elements = { 192.168.0.1 : counter packets 8 bytes 2672, .. because elemets get added on the fly, the kernel must chose a set backend type that implements the ->update() function, otherwise rule insertion fails with EOPNOTSUPP. Therefore, skip set types that lack ->update, and also make sure we do not discard a (bad) candidate when we did yet find any candidate at all. This could happen when userspace prefers low memory footprint -- the set implementation currently checked might not be a fit at all. Make sure we pick it anyway (!bops). In case next candidate is a better fix, it will be chosen instead. But in case nothing else is found we at least have a non-ideal match rather than no match at all. Fixes: 6c03ae210ce3 ("netfilter: nft_set_hash: add non-resizable hashtable implementation") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 5 ++++- net/netfilter/nft_set_hash.c | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c4acc7340eb1..36f69acaf51f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2446,6 +2446,9 @@ EXPORT_SYMBOL_GPL(nft_unregister_set); static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags) { + if ((flags & NFT_SET_EVAL) && !ops->update) + return false; + return (flags & ops->features) == (flags & NFT_SET_FEATURES); } @@ -2510,7 +2513,7 @@ nft_select_set_ops(const struct nft_ctx *ctx, if (est.space == best.space && est.lookup < best.lookup) break; - } else if (est.size < best.size) { + } else if (est.size < best.size || !bops) { break; } continue; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index d40591fe1b2f..fc9c6d5d64cd 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -674,7 +674,7 @@ static const struct nft_set_ops * nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, u32 flags) { - if (desc->size && !(flags & NFT_SET_TIMEOUT)) { + if (desc->size && !(flags & (NFT_SET_EVAL | NFT_SET_TIMEOUT))) { switch (desc->klen) { case 4: return &nft_hash_fast_ops; -- cgit v1.2.1 From ae6153b50f9bf75a4952050f32fe168f68cdd657 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 18 Mar 2018 19:22:39 +0100 Subject: netfilter: nf_tables: permit second nat hook if colliding hook is going away Sergei Trofimovich reported that restoring an nft ruleset doesn't work anymore unless old rule content is flushed first. The problem stems from a recent change designed to prevent multiple nat hooks at the same hook point locations and nftables transaction model. A 'flush ruleset' won't take effect until the entire transaction has completed. So, if one has a nft.rules file that contains a 'flush ruleset', followed by a nat hook register request, then 'nft -f file' will work, but running 'nft -f file' again will fail with -EBUSY. Reason is that nftables will place the flush/removal requests in the transaction list, but it will not act on the removal until after all new rules are in place. The netfilter core will therefore get request to register a new nat hook before the old one is removed -- this now fails as the netfilter core can't know the existing hook is staged for removal. To fix this, we can search the transaction log when a hook collision is detected. The collision is okay if 1. there is a delete request pending for the nat hook that is already registered. 2. there is no second add request for a matching nat hook. This is required to only apply the exception once. Fixes: f92b40a8b2645 ("netfilter: core: only allow one nat hook per hook point") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 64 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 36f69acaf51f..cc8ca00e6e6e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -74,15 +74,77 @@ static void nft_trans_destroy(struct nft_trans *trans) kfree(trans); } +/* removal requests are queued in the commit_list, but not acted upon + * until after all new rules are in place. + * + * Therefore, nf_register_net_hook(net, &nat_hook) runs before pending + * nf_unregister_net_hook(). + * + * nf_register_net_hook thus fails if a nat hook is already in place + * even if the conflicting hook is about to be removed. + * + * If collision is detected, search commit_log for DELCHAIN matching + * the new nat hooknum; if we find one collision is temporary: + * + * Either transaction is aborted (new/colliding hook is removed), or + * transaction is committed (old hook is removed). + */ +static bool nf_tables_allow_nat_conflict(const struct net *net, + const struct nf_hook_ops *ops) +{ + const struct nft_trans *trans; + bool ret = false; + + if (!ops->nat_hook) + return false; + + list_for_each_entry(trans, &net->nft.commit_list, list) { + const struct nf_hook_ops *pending_ops; + const struct nft_chain *pending; + + if (trans->msg_type != NFT_MSG_NEWCHAIN && + trans->msg_type != NFT_MSG_DELCHAIN) + continue; + + pending = trans->ctx.chain; + if (!nft_is_base_chain(pending)) + continue; + + pending_ops = &nft_base_chain(pending)->ops; + if (pending_ops->nat_hook && + pending_ops->pf == ops->pf && + pending_ops->hooknum == ops->hooknum) { + /* other hook registration already pending? */ + if (trans->msg_type == NFT_MSG_NEWCHAIN) + return false; + + ret = true; + } + } + + return ret; +} + static int nf_tables_register_hook(struct net *net, const struct nft_table *table, struct nft_chain *chain) { + struct nf_hook_ops *ops; + int ret; + if (table->flags & NFT_TABLE_F_DORMANT || !nft_is_base_chain(chain)) return 0; - return nf_register_net_hook(net, &nft_base_chain(chain)->ops); + ops = &nft_base_chain(chain)->ops; + ret = nf_register_net_hook(net, ops); + if (ret == -EBUSY && nf_tables_allow_nat_conflict(net, ops)) { + ops->nat_hook = false; + ret = nf_register_net_hook(net, ops); + ops->nat_hook = true; + } + + return ret; } static void nf_tables_unregister_hook(struct net *net, -- cgit v1.2.1 From 467697d289e7e6e1b15910d99096c0da08c56d5b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 20 Mar 2018 15:25:37 +0100 Subject: netfilter: nf_tables: add missing netlink attrs to policies Fixes: 8aeff920dcc9 ("netfilter: nf_tables: add stateful object reference to set elements") Fixes: f25ad2e907f1 ("netfilter: nf_tables: prepare for expressions associated to set elements") Fixes: 1a94e38d254b ("netfilter: nf_tables: add NFTA_RULE_ID attribute") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index cc8ca00e6e6e..14777c404071 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1973,6 +1973,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { [NFTA_RULE_POSITION] = { .type = NLA_U64 }, [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, + [NFTA_RULE_ID] = { .type = NLA_U32 }, }; static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, @@ -3380,6 +3381,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 }, [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, + [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED }, + [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING }, }; static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { -- cgit v1.2.1 From aebfa52a925d701114afd6af0def35bab16d4f47 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 22 Mar 2018 11:08:50 +0100 Subject: netfilter: drop template ct when conntrack is skipped. The ipv4 nf_ct code currently skips the nf_conntrak_in() call for fragmented packets. As a results later matches/target can end up manipulating template ct entry instead of 'real' ones. Exploiting the above, syzbot found a way to trigger the following splat: WARNING: CPU: 1 PID: 4242 at net/netfilter/xt_cluster.c:55 xt_cluster_mt+0x6c1/0x840 net/netfilter/xt_cluster.c:127 Kernel panic - not syncing: panic_on_warn set ... CPU: 1 PID: 4242 Comm: syzkaller027971 Not tainted 4.16.0-rc2+ #243 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x24d lib/dump_stack.c:53 panic+0x1e4/0x41c kernel/panic.c:183 __warn+0x1dc/0x200 kernel/panic.c:547 report_bug+0x211/0x2d0 lib/bug.c:184 fixup_bug.part.11+0x37/0x80 arch/x86/kernel/traps.c:178 fixup_bug arch/x86/kernel/traps.c:247 [inline] do_error_trap+0x2d7/0x3e0 arch/x86/kernel/traps.c:296 do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:315 invalid_op+0x58/0x80 arch/x86/entry/entry_64.S:957 RIP: 0010:xt_cluster_hash net/netfilter/xt_cluster.c:55 [inline] RIP: 0010:xt_cluster_mt+0x6c1/0x840 net/netfilter/xt_cluster.c:127 RSP: 0018:ffff8801d2f6f2d0 EFLAGS: 00010293 RAX: ffff8801af700540 RBX: 0000000000000000 RCX: ffffffff84a2d1e1 RDX: 0000000000000000 RSI: ffff8801d2f6f478 RDI: ffff8801cafd336a RBP: ffff8801d2f6f2e8 R08: 0000000000000000 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8801b03b3d18 R13: ffff8801cafd3300 R14: dffffc0000000000 R15: ffff8801d2f6f478 ipt_do_table+0xa91/0x19b0 net/ipv4/netfilter/ip_tables.c:296 iptable_filter_hook+0x65/0x80 net/ipv4/netfilter/iptable_filter.c:41 nf_hook_entry_hookfn include/linux/netfilter.h:120 [inline] nf_hook_slow+0xba/0x1a0 net/netfilter/core.c:483 nf_hook include/linux/netfilter.h:243 [inline] NF_HOOK include/linux/netfilter.h:286 [inline] raw_send_hdrinc.isra.17+0xf39/0x1880 net/ipv4/raw.c:432 raw_sendmsg+0x14cd/0x26b0 net/ipv4/raw.c:669 inet_sendmsg+0x11f/0x5e0 net/ipv4/af_inet.c:763 sock_sendmsg_nosec net/socket.c:629 [inline] sock_sendmsg+0xca/0x110 net/socket.c:639 SYSC_sendto+0x361/0x5c0 net/socket.c:1748 SyS_sendto+0x40/0x50 net/socket.c:1716 do_syscall_64+0x280/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x441b49 RSP: 002b:00007ffff5ca8b18 EFLAGS: 00000216 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 0000000000441b49 RDX: 0000000000000030 RSI: 0000000020ff7000 RDI: 0000000000000003 RBP: 00000000006cc018 R08: 000000002066354c R09: 0000000000000010 R10: 0000000000000000 R11: 0000000000000216 R12: 0000000000403470 R13: 0000000000403500 R14: 0000000000000000 R15: 0000000000000000 Dumping ftrace buffer: (ftrace buffer empty) Kernel Offset: disabled Rebooting in 86400 seconds.. Instead of adding checks for template ct on every target/match manipulating skb->_nfct, simply drop the template ct when skipping nf_conntrack_in(). Fixes: 7b4fdf77a450ec ("netfilter: don't track fragmented packets") Reported-and-tested-by: syzbot+0346441ae0545cfcea3a@syzkaller.appspotmail.com Signed-off-by: Paolo Abeni Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index b50721d9d30e..9db988f9a4d7 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -154,8 +154,20 @@ static unsigned int ipv4_conntrack_local(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ + if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */ + enum ip_conntrack_info ctinfo; + struct nf_conn *tmpl; + + tmpl = nf_ct_get(skb, &ctinfo); + if (tmpl && nf_ct_is_template(tmpl)) { + /* when skipping ct, clear templates to avoid fooling + * later targets/matches + */ + skb->_nfct = 0; + nf_ct_put(tmpl); + } return NF_ACCEPT; + } return nf_conntrack_in(state->net, PF_INET, state->hook, skb); } -- cgit v1.2.1 From d92191aa84e5f187d543867c3d54b38f294833fa Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 21 Mar 2018 13:55:42 +0100 Subject: netfilter: nf_tables: cache device name in flowtable object Devices going away have to grab the nfnl_lock from the netdev event path to avoid races with control plane updates. However, netlink dumps in netfilter do not hold nfnl_lock mutex. Cache the device name into the objects to avoid an use-after-free situation for a device that is going away. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 14777c404071..977d43e00f98 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4932,8 +4932,6 @@ nf_tables_flowtable_lookup_byhandle(const struct nft_table *table, return ERR_PTR(-ENOENT); } -#define NFT_FLOWTABLE_DEVICE_MAX 8 - static int nf_tables_parse_devices(const struct nft_ctx *ctx, const struct nlattr *attr, struct net_device *dev_array[], int *len) @@ -5006,7 +5004,7 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS], dev_array, &n); if (err < 0) - goto err1; + return err; ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL); if (!ops) { @@ -5026,6 +5024,8 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, flowtable->ops[i].priv = &flowtable->data.rhashtable; flowtable->ops[i].hook = flowtable->data.type->hook; flowtable->ops[i].dev = dev_array[i]; + flowtable->dev_name[i] = kstrdup(dev_array[i]->name, + GFP_KERNEL); } err = 0; @@ -5203,8 +5203,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, err5: i = flowtable->ops_len; err4: - for (k = i - 1; k >= 0; k--) + for (k = i - 1; k >= 0; k--) { + kfree(flowtable->dev_name[k]); nf_unregister_net_hook(net, &flowtable->ops[k]); + } kfree(flowtable->ops); err3: @@ -5294,9 +5296,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; for (i = 0; i < flowtable->ops_len; i++) { - if (flowtable->ops[i].dev && + if (flowtable->dev_name[i][0] && nla_put_string(skb, NFTA_DEVICE_NAME, - flowtable->ops[i].dev->name)) + flowtable->dev_name[i])) goto nla_put_failure; } nla_nest_end(skb, nest_devs); @@ -5538,6 +5540,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, continue; nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); + flowtable->dev_name[i][0] = '\0'; flowtable->ops[i].dev = NULL; break; } -- cgit v1.2.1 From 90d2723c6d4cb2ace50fc3b932a2bcc77710450b Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 20 Mar 2018 17:00:19 +0100 Subject: netfilter: nf_tables: do not hold reference on netdevice from preparation phase The netfilter netdevice event handler hold the nfnl_lock mutex, this avoids races with a device going away while such device is being attached to hooks from the netlink control plane. Therefore, either control plane bails out with ENOENT or netdevice event path waits until the hook that is attached to net_device is registered. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 977d43e00f98..530e12ae52d7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1288,8 +1288,6 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) free_percpu(basechain->stats); if (basechain->stats) static_branch_dec(&nft_counters_enabled); - if (basechain->ops.dev != NULL) - dev_put(basechain->ops.dev); kfree(chain->name); kfree(basechain); } else { @@ -1356,7 +1354,7 @@ static int nft_chain_parse_hook(struct net *net, } nla_strlcpy(ifname, ha[NFTA_HOOK_DEV], IFNAMSIZ); - dev = dev_get_by_name(net, ifname); + dev = __dev_get_by_name(net, ifname); if (!dev) { module_put(type->owner); return -ENOENT; @@ -1373,8 +1371,6 @@ static int nft_chain_parse_hook(struct net *net, static void nft_chain_release_hook(struct nft_chain_hook *hook) { module_put(hook->type->owner); - if (hook->dev != NULL) - dev_put(hook->dev); } static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, @@ -4948,7 +4944,7 @@ static int nf_tables_parse_devices(const struct nft_ctx *ctx, } nla_strlcpy(ifname, tmp, IFNAMSIZ); - dev = dev_get_by_name(ctx->net, ifname); + dev = __dev_get_by_name(ctx->net, ifname); if (!dev) { err = -ENOENT; goto err1; @@ -5007,10 +5003,8 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, return err; ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL); - if (!ops) { - err = -ENOMEM; - goto err1; - } + if (!ops) + return -ENOMEM; flowtable->hooknum = hooknum; flowtable->priority = priority; @@ -5028,11 +5022,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, GFP_KERNEL); } - err = 0; -err1: - for (i = 0; i < n; i++) - dev_put(dev_array[i]); - return err; } -- cgit v1.2.1 From 9a3fb9fb84cc30577c1b012a6a3efda944684291 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Mon, 19 Mar 2018 07:15:39 +0100 Subject: xfrm: Fix transport mode skb control buffer usage. A recent commit introduced a new struct xfrm_trans_cb that is used with the sk_buff control buffer. Unfortunately it placed the structure in front of the control buffer and overlooked that the IPv4/IPv6 control buffer is still needed for some layer 4 protocols. As a result the IPv4/IPv6 control buffer is overwritten with this structure. Fix this by setting a apropriate header in front of the structure. Fixes acf568ee859f ("xfrm: Reinject transport-mode packets ...") Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 1472c0857975..81788105c164 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -26,6 +26,12 @@ struct xfrm_trans_tasklet { }; struct xfrm_trans_cb { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); }; -- cgit v1.2.1 From f6cc9c054e77b9a28d4594bcc201697edb21dfd2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 22 Mar 2018 19:53:33 +0200 Subject: ip_tunnel: Emit events for post-register MTU changes For tunnels created with IFLA_MTU, MTU of the netdevice is set by rtnl_create_link() (called from rtnl_newlink()) before the device is registered. However without IFLA_MTU that's not done. rtnl_newlink() proceeds by calling struct rtnl_link_ops.newlink, which via ip_tunnel_newlink() calls register_netdevice(), and that emits NETDEV_REGISTER. Thus any listeners that inspect the netdevice get the MTU of 0. After ip_tunnel_newlink() corrects the MTU after registering the netdevice, but since there's no event, the listeners don't get to know about the MTU until something else happens--such as a NETDEV_UP event. That's not ideal. So instead of setting the MTU directly, go through dev_set_mtu(), which takes care of distributing the necessary NETDEV_PRECHANGEMTU and NETDEV_CHANGEMTU events. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 6d21068f9b55..7b85ffad5d74 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -362,13 +362,18 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, struct ip_tunnel *nt; struct net_device *dev; int t_hlen; + int mtu; + int err; BUG_ON(!itn->fb_tunnel_dev); dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms); if (IS_ERR(dev)) return ERR_CAST(dev); - dev->mtu = ip_tunnel_bind_dev(dev); + mtu = ip_tunnel_bind_dev(dev); + err = dev_set_mtu(dev, mtu); + if (err) + goto err_dev_set_mtu; nt = netdev_priv(dev); t_hlen = nt->hlen + sizeof(struct iphdr); @@ -376,6 +381,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; ip_tunnel_add(itn, nt); return nt; + +err_dev_set_mtu: + unregister_netdevice(dev); + return ERR_PTR(err); } int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, @@ -1102,17 +1111,24 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], nt->fwmark = fwmark; err = register_netdevice(dev); if (err) - goto out; + goto err_register_netdevice; if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); mtu = ip_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) - dev->mtu = mtu; + if (!tb[IFLA_MTU]) { + err = dev_set_mtu(dev, mtu); + if (err) + goto err_dev_set_mtu; + } ip_tunnel_add(itn, nt); -out: + return 0; + +err_dev_set_mtu: + unregister_netdevice(dev); +err_register_netdevice: return err; } EXPORT_SYMBOL_GPL(ip_tunnel_newlink); -- cgit v1.2.1 From 1bfa26ff8c4b7512f4e4efa6df211239223033d4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 23 Mar 2018 07:56:58 -0700 Subject: ipv6: fix possible deadlock in rt6_age_examine_exception() syzbot reported a LOCKDEP splat [1] in rt6_age_examine_exception() rt6_age_examine_exception() is called while rt6_exception_lock is held. This lock is the lower one in the lock hierarchy, thus we can not call dst_neigh_lookup() function, as it can fallback to neigh_create() We should instead do a pure RCU lookup. As a bonus we avoid a pair of atomic operations on neigh refcount. [1] WARNING: possible circular locking dependency detected 4.16.0-rc4+ #277 Not tainted syz-executor7/4015 is trying to acquire lock: (&ndev->lock){++--}, at: [<00000000416dce19>] __ipv6_dev_mc_dec+0x45/0x350 net/ipv6/mcast.c:928 but task is already holding lock: (&tbl->lock){++-.}, at: [<00000000b5cb1d65>] neigh_ifdown+0x3d/0x250 net/core/neighbour.c:292 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&tbl->lock){++-.}: __raw_write_lock_bh include/linux/rwlock_api_smp.h:203 [inline] _raw_write_lock_bh+0x31/0x40 kernel/locking/spinlock.c:312 __neigh_create+0x87e/0x1d90 net/core/neighbour.c:528 neigh_create include/net/neighbour.h:315 [inline] ip6_neigh_lookup+0x9a7/0xba0 net/ipv6/route.c:228 dst_neigh_lookup include/net/dst.h:405 [inline] rt6_age_examine_exception net/ipv6/route.c:1609 [inline] rt6_age_exceptions+0x381/0x660 net/ipv6/route.c:1645 fib6_age+0xfb/0x140 net/ipv6/ip6_fib.c:2033 fib6_clean_node+0x389/0x580 net/ipv6/ip6_fib.c:1919 fib6_walk_continue+0x46c/0x8a0 net/ipv6/ip6_fib.c:1845 fib6_walk+0x91/0xf0 net/ipv6/ip6_fib.c:1893 fib6_clean_tree+0x1e6/0x340 net/ipv6/ip6_fib.c:1970 __fib6_clean_all+0x1f4/0x3a0 net/ipv6/ip6_fib.c:1986 fib6_clean_all net/ipv6/ip6_fib.c:1997 [inline] fib6_run_gc+0x16b/0x3c0 net/ipv6/ip6_fib.c:2053 ndisc_netdev_event+0x3c2/0x4a0 net/ipv6/ndisc.c:1781 notifier_call_chain+0x136/0x2c0 kernel/notifier.c:93 __raw_notifier_call_chain kernel/notifier.c:394 [inline] raw_notifier_call_chain+0x2d/0x40 kernel/notifier.c:401 call_netdevice_notifiers_info+0x32/0x70 net/core/dev.c:1707 call_netdevice_notifiers net/core/dev.c:1725 [inline] __dev_notify_flags+0x262/0x430 net/core/dev.c:6960 dev_change_flags+0xf5/0x140 net/core/dev.c:6994 devinet_ioctl+0x126a/0x1ac0 net/ipv4/devinet.c:1080 inet_ioctl+0x184/0x310 net/ipv4/af_inet.c:919 sock_do_ioctl+0xef/0x390 net/socket.c:957 sock_ioctl+0x36b/0x610 net/socket.c:1081 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686 SYSC_ioctl fs/ioctl.c:701 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 -> #2 (rt6_exception_lock){+.-.}: __raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline] _raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168 spin_lock_bh include/linux/spinlock.h:315 [inline] rt6_flush_exceptions+0x21/0x210 net/ipv6/route.c:1367 fib6_del_route net/ipv6/ip6_fib.c:1677 [inline] fib6_del+0x624/0x12c0 net/ipv6/ip6_fib.c:1761 __ip6_del_rt+0xc7/0x120 net/ipv6/route.c:2980 ip6_del_rt+0x132/0x1a0 net/ipv6/route.c:2993 __ipv6_dev_ac_dec+0x3b1/0x600 net/ipv6/anycast.c:332 ipv6_dev_ac_dec net/ipv6/anycast.c:345 [inline] ipv6_sock_ac_close+0x2b4/0x3e0 net/ipv6/anycast.c:200 inet6_release+0x48/0x70 net/ipv6/af_inet6.c:433 sock_release+0x8d/0x1e0 net/socket.c:594 sock_close+0x16/0x20 net/socket.c:1149 __fput+0x327/0x7e0 fs/file_table.c:209 ____fput+0x15/0x20 fs/file_table.c:243 task_work_run+0x199/0x270 kernel/task_work.c:113 exit_task_work include/linux/task_work.h:22 [inline] do_exit+0x9bb/0x1ad0 kernel/exit.c:865 do_group_exit+0x149/0x400 kernel/exit.c:968 get_signal+0x73a/0x16d0 kernel/signal.c:2469 do_signal+0x90/0x1e90 arch/x86/kernel/signal.c:809 exit_to_usermode_loop+0x258/0x2f0 arch/x86/entry/common.c:162 prepare_exit_to_usermode arch/x86/entry/common.c:196 [inline] syscall_return_slowpath arch/x86/entry/common.c:265 [inline] do_syscall_64+0x6ec/0x940 arch/x86/entry/common.c:292 entry_SYSCALL_64_after_hwframe+0x42/0xb7 -> #1 (&(&tb->tb6_lock)->rlock){+.-.}: __raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline] _raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168 spin_lock_bh include/linux/spinlock.h:315 [inline] __ip6_ins_rt+0x56/0x90 net/ipv6/route.c:1007 ip6_route_add+0x141/0x190 net/ipv6/route.c:2955 addrconf_prefix_route+0x44f/0x620 net/ipv6/addrconf.c:2359 fixup_permanent_addr net/ipv6/addrconf.c:3368 [inline] addrconf_permanent_addr net/ipv6/addrconf.c:3391 [inline] addrconf_notify+0x1ad2/0x2310 net/ipv6/addrconf.c:3460 notifier_call_chain+0x136/0x2c0 kernel/notifier.c:93 __raw_notifier_call_chain kernel/notifier.c:394 [inline] raw_notifier_call_chain+0x2d/0x40 kernel/notifier.c:401 call_netdevice_notifiers_info+0x32/0x70 net/core/dev.c:1707 call_netdevice_notifiers net/core/dev.c:1725 [inline] __dev_notify_flags+0x15d/0x430 net/core/dev.c:6958 dev_change_flags+0xf5/0x140 net/core/dev.c:6994 do_setlink+0xa22/0x3bb0 net/core/rtnetlink.c:2357 rtnl_newlink+0xf37/0x1a50 net/core/rtnetlink.c:2965 rtnetlink_rcv_msg+0x57f/0xb10 net/core/rtnetlink.c:4641 netlink_rcv_skb+0x14b/0x380 net/netlink/af_netlink.c:2444 rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4659 netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline] netlink_unicast+0x4c4/0x6b0 net/netlink/af_netlink.c:1334 netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897 sock_sendmsg_nosec net/socket.c:629 [inline] sock_sendmsg+0xca/0x110 net/socket.c:639 ___sys_sendmsg+0x767/0x8b0 net/socket.c:2047 __sys_sendmsg+0xe5/0x210 net/socket.c:2081 SYSC_sendmsg net/socket.c:2092 [inline] SyS_sendmsg+0x2d/0x50 net/socket.c:2088 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 -> #0 (&ndev->lock){++--}: lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3920 __raw_write_lock_bh include/linux/rwlock_api_smp.h:203 [inline] _raw_write_lock_bh+0x31/0x40 kernel/locking/spinlock.c:312 __ipv6_dev_mc_dec+0x45/0x350 net/ipv6/mcast.c:928 ipv6_dev_mc_dec+0x110/0x1f0 net/ipv6/mcast.c:961 pndisc_destructor+0x21a/0x340 net/ipv6/ndisc.c:392 pneigh_ifdown net/core/neighbour.c:695 [inline] neigh_ifdown+0x149/0x250 net/core/neighbour.c:294 rt6_disable_ip+0x537/0x700 net/ipv6/route.c:3874 addrconf_ifdown+0x14b/0x14f0 net/ipv6/addrconf.c:3633 addrconf_notify+0x5f8/0x2310 net/ipv6/addrconf.c:3557 notifier_call_chain+0x136/0x2c0 kernel/notifier.c:93 __raw_notifier_call_chain kernel/notifier.c:394 [inline] raw_notifier_call_chain+0x2d/0x40 kernel/notifier.c:401 call_netdevice_notifiers_info+0x32/0x70 net/core/dev.c:1707 call_netdevice_notifiers net/core/dev.c:1725 [inline] __dev_notify_flags+0x262/0x430 net/core/dev.c:6960 dev_change_flags+0xf5/0x140 net/core/dev.c:6994 devinet_ioctl+0x126a/0x1ac0 net/ipv4/devinet.c:1080 inet_ioctl+0x184/0x310 net/ipv4/af_inet.c:919 packet_ioctl+0x1ff/0x310 net/packet/af_packet.c:4066 sock_do_ioctl+0xef/0x390 net/socket.c:957 sock_ioctl+0x36b/0x610 net/socket.c:1081 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686 SYSC_ioctl fs/ioctl.c:701 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 other info that might help us debug this: Chain exists of: &ndev->lock --> rt6_exception_lock --> &tbl->lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&tbl->lock); lock(rt6_exception_lock); lock(&tbl->lock); lock(&ndev->lock); *** DEADLOCK *** 2 locks held by syz-executor7/4015: #0: (rtnl_mutex){+.+.}, at: [<00000000a2f16daa>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 #1: (&tbl->lock){++-.}, at: [<00000000b5cb1d65>] neigh_ifdown+0x3d/0x250 net/core/neighbour.c:292 stack backtrace: CPU: 0 PID: 4015 Comm: syz-executor7 Not tainted 4.16.0-rc4+ #277 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x24d lib/dump_stack.c:53 print_circular_bug.isra.38+0x2cd/0x2dc kernel/locking/lockdep.c:1223 check_prev_add kernel/locking/lockdep.c:1863 [inline] check_prevs_add kernel/locking/lockdep.c:1976 [inline] validate_chain kernel/locking/lockdep.c:2417 [inline] __lock_acquire+0x30a8/0x3e00 kernel/locking/lockdep.c:3431 lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3920 __raw_write_lock_bh include/linux/rwlock_api_smp.h:203 [inline] _raw_write_lock_bh+0x31/0x40 kernel/locking/spinlock.c:312 __ipv6_dev_mc_dec+0x45/0x350 net/ipv6/mcast.c:928 ipv6_dev_mc_dec+0x110/0x1f0 net/ipv6/mcast.c:961 pndisc_destructor+0x21a/0x340 net/ipv6/ndisc.c:392 pneigh_ifdown net/core/neighbour.c:695 [inline] neigh_ifdown+0x149/0x250 net/core/neighbour.c:294 rt6_disable_ip+0x537/0x700 net/ipv6/route.c:3874 addrconf_ifdown+0x14b/0x14f0 net/ipv6/addrconf.c:3633 addrconf_notify+0x5f8/0x2310 net/ipv6/addrconf.c:3557 notifier_call_chain+0x136/0x2c0 kernel/notifier.c:93 __raw_notifier_call_chain kernel/notifier.c:394 [inline] raw_notifier_call_chain+0x2d/0x40 kernel/notifier.c:401 call_netdevice_notifiers_info+0x32/0x70 net/core/dev.c:1707 call_netdevice_notifiers net/core/dev.c:1725 [inline] __dev_notify_flags+0x262/0x430 net/core/dev.c:6960 dev_change_flags+0xf5/0x140 net/core/dev.c:6994 devinet_ioctl+0x126a/0x1ac0 net/ipv4/devinet.c:1080 inet_ioctl+0x184/0x310 net/ipv4/af_inet.c:919 packet_ioctl+0x1ff/0x310 net/packet/af_packet.c:4066 sock_do_ioctl+0xef/0x390 net/socket.c:957 sock_ioctl+0x36b/0x610 net/socket.c:1081 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686 SYSC_ioctl fs/ioctl.c:701 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 Fixes: c757faa8bfa2 ("ipv6: prepare fib6_age() for exception table") Signed-off-by: Eric Dumazet Cc: Wei Wang Cc: Martin KaFai Lau Acked-by: Wei Wang Signed-off-by: David S. Miller --- net/ipv6/route.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b0d5c64e1978..b33d057ac5eb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1626,11 +1626,10 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket, struct neighbour *neigh; __u8 neigh_flags = 0; - neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); - if (neigh) { + neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); + if (neigh) neigh_flags = neigh->flags; - neigh_release(neigh); - } + if (!(neigh_flags & NTF_ROUTER)) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); @@ -1654,7 +1653,8 @@ void rt6_age_exceptions(struct rt6_info *rt, if (!rcu_access_pointer(rt->rt6i_exception_bucket)) return; - spin_lock_bh(&rt6_exception_lock); + rcu_read_lock_bh(); + spin_lock(&rt6_exception_lock); bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); @@ -1668,7 +1668,8 @@ void rt6_age_exceptions(struct rt6_info *rt, bucket++; } } - spin_unlock_bh(&rt6_exception_lock); + spin_unlock(&rt6_exception_lock); + rcu_read_unlock_bh(); } struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, -- cgit v1.2.1 From f8fb3419ead44f9a3136995acd24e35da4525177 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Tue, 20 Mar 2018 03:13:27 +0100 Subject: batman-adv: fix multicast-via-unicast transmission with AP isolation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For multicast frames AP isolation is only supposed to be checked on the receiving nodes and never on the originating one. Furthermore, the isolation or wifi flag bits should only be intepreted as such for unicast and never multicast TT entries. By injecting flags to the multicast TT entry claimed by a single target node it was verified in tests that this multicast address becomes unreachable, leading to packet loss. Omitting the "src" parameter to the batadv_transtable_search() call successfully skipped the AP isolation check and made the target reachable again. Fixes: 1d8ab8d3c176 ("batman-adv: Modified forwarding behaviour for multicast packets") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/multicast.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index d70640135e3a..ee56af5c43e0 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -814,8 +814,8 @@ static struct batadv_orig_node * batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, struct ethhdr *ethhdr) { - return batadv_transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest, BATADV_NO_FLAGS); + return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, + BATADV_NO_FLAGS); } /** -- cgit v1.2.1 From a752c0a4524889cdc0765925258fd1fd72344100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Thu, 22 Mar 2018 00:21:32 +0100 Subject: batman-adv: fix packet loss for broadcasted DHCP packets to a server MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DHCP connectivity issues can currently occur if the following conditions are met: 1) A DHCP packet from a client to a server 2) This packet has a multicast destination 3) This destination has a matching entry in the translation table (FF:FF:FF:FF:FF:FF for IPv4, 33:33:00:01:00:02/33:33:00:01:00:03 for IPv6) 4) The orig-node determined by TT for the multicast destination does not match the orig-node determined by best-gateway-selection In this case the DHCP packet will be dropped. The "gateway-out-of-range" check is supposed to only be applied to unicasted DHCP packets to a specific DHCP server. In that case dropping the the unicasted frame forces the client to retry via a broadcasted one, but now directed to the new best gateway. A DHCP packet with broadcast/multicast destination is already ensured to always be delivered to the best gateway. Dropping a multicasted DHCP packet here will only prevent completing DHCP as there is no other fallback. So far, it seems the unicast check was implicitly performed by expecting the batadv_transtable_search() to return NULL for multicast destinations. However, a multicast address could have always ended up in the translation table and in fact is now common. To fix this potential loss of a DHCP client-to-server packet to a multicast address this patch adds an explicit multicast destination check to reliably bail out of the gateway-out-of-range check for such destinations. The issue and fix were tested in the following three node setup: - Line topology, A-B-C - A: gateway client, DHCP client - B: gateway server, hop-penalty increased: 30->60, DHCP server - C: gateway server, code modifications to announce FF:FF:FF:FF:FF:FF Without this patch, A would never transmit its DHCP Discover packet due to an always "out-of-range" condition. With this patch, a full DHCP handshake between A and B was possible again. Fixes: be7af5cf9cae ("batman-adv: refactoring gateway handling code") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/gateway_client.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 37fe9a644f22..808d2dd4a839 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -746,7 +746,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, { struct batadv_neigh_node *neigh_curr = NULL; struct batadv_neigh_node *neigh_old = NULL; - struct batadv_orig_node *orig_dst_node; + struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL; struct batadv_gw_node *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; @@ -757,6 +757,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, 0); + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto out; + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) -- cgit v1.2.1 From 32c1733f0dd4bd11d6e65512bf4dc337c0452c8e Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 22 Mar 2018 21:12:39 -0600 Subject: netfilter: nf_socket: Fix out of bounds access in nf_sk_lookup_slow_v{4,6} skb_header_pointer will copy data into a buffer if data is non linear, otherwise it will return a pointer in the linear section of the data. nf_sk_lookup_slow_v{4,6} always copies data of size udphdr but later accesses memory within the size of tcphdr (th->doff) in case of TCP packets. This causes a crash when running with KASAN with the following call stack - BUG: KASAN: stack-out-of-bounds in xt_socket_lookup_slow_v4+0x524/0x718 net/netfilter/xt_socket.c:178 Read of size 2 at addr ffffffe3d417a87c by task syz-executor/28971 CPU: 2 PID: 28971 Comm: syz-executor Tainted: G B W O 4.9.65+ #1 Call trace: [] dump_backtrace+0x0/0x428 arch/arm64/kernel/traps.c:76 [] show_stack+0x28/0x38 arch/arm64/kernel/traps.c:226 [] __dump_stack lib/dump_stack.c:15 [inline] [] dump_stack+0xd4/0x124 lib/dump_stack.c:51 [] print_address_description+0x68/0x258 mm/kasan/report.c:248 [] kasan_report_error mm/kasan/report.c:347 [inline] [] kasan_report.part.2+0x228/0x2f0 mm/kasan/report.c:371 [] kasan_report+0x5c/0x70 mm/kasan/report.c:372 [] check_memory_region_inline mm/kasan/kasan.c:308 [inline] [] __asan_load2+0x84/0x98 mm/kasan/kasan.c:739 [] __tcp_hdrlen include/linux/tcp.h:35 [inline] [] xt_socket_lookup_slow_v4+0x524/0x718 net/netfilter/xt_socket.c:178 Fix this by copying data into appropriate size headers based on protocol. Fixes: a583636a83ea ("inet: refactor inet[6]_lookup functions to take skb") Signed-off-by: Tejaswi Tanikella Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nf_socket_ipv4.c | 6 ++++-- net/ipv6/netfilter/nf_socket_ipv6.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c index e9293bdebba0..4824b1e183a1 100644 --- a/net/ipv4/netfilter/nf_socket_ipv4.c +++ b/net/ipv4/netfilter/nf_socket_ipv4.c @@ -108,10 +108,12 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb, int doff = 0; if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct tcphdr _hdr; + struct udphdr *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), - sizeof(_hdr), &_hdr); + iph->protocol == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), &_hdr); if (hp == NULL) return NULL; diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index ebb2bf84232a..f14de4b6d639 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -116,9 +116,11 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, } if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct tcphdr _hdr; + struct udphdr *hp; - hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); + hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), &_hdr); if (hp == NULL) return NULL; -- cgit v1.2.1 From bc58a1baf2a97838a422ce4e75180c4b680e7a9b Mon Sep 17 00:00:00 2001 From: Hans Wippel Date: Fri, 23 Mar 2018 11:05:45 +0100 Subject: net/ipv4: disable SMC TCP option with SYN Cookies Currently, the SMC experimental TCP option in a SYN packet is lost on the server side when SYN Cookies are active. However, the corresponding SYNACK sent back to the client contains the SMC option. This causes an inconsistent view of the SMC capabilities on the client and server. This patch disables the SMC option in the SYNACK when SYN Cookies are active to avoid this issue. Fixes: 60e2a7780793b ("tcp: TCP experimental option for SMC") Signed-off-by: Hans Wippel Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/ipv4/syncookies.c | 2 ++ net/ipv4/tcp_input.c | 3 +++ net/ipv6/syncookies.c | 2 ++ 3 files changed, 7 insertions(+) (limited to 'net') diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index fda37f2862c9..c3387dfd725b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -349,6 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack = 0; treq->tfo_listener = false; + if (IS_ENABLED(CONFIG_SMC)) + ireq->smc_ok = 0; ireq->ir_iif = inet_request_bound_dev_if(sk, skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9a1b3c1c1c14..ff6cd98ce8d5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6256,6 +6256,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); + if (IS_ENABLED(CONFIG_SMC) && want_cookie) + tmp_opt.smc_ok = 0; + tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index e7a3a6b6cf56..e997141aed8c 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -217,6 +217,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq->snt_isn = cookie; treq->ts_off = 0; treq->txhash = net_tx_rndhash(); + if (IS_ENABLED(CONFIG_SMC)) + ireq->smc_ok = 0; /* * We need to lookup the dst_entry to get the correct window size. -- cgit v1.2.1 From 7880287981b60a6808f39f297bb66936e8bdf57a Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Fri, 23 Mar 2018 13:49:02 +0100 Subject: netlink: make sure nladdr has correct size in netlink_connect() KMSAN reports use of uninitialized memory in the case when |alen| is smaller than sizeof(struct sockaddr_nl), and therefore |nladdr| isn't fully copied from the userspace. Signed-off-by: Alexander Potapenko Fixes: 1da177e4c3f41524 ("Linux-2.6.12-rc2") Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/netlink/af_netlink.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 07e8478068f0..70c455341243 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1085,6 +1085,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, if (addr->sa_family != AF_NETLINK) return -EINVAL; + if (alen < sizeof(struct sockaddr_nl)) + return -EINVAL; + if ((nladdr->nl_groups || nladdr->nl_pid) && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; -- cgit v1.2.1 From 10b8a3de603df7b96004179b1b33b1708c76d144 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 23 Mar 2018 14:47:30 +0100 Subject: ipv6: the entire IPv6 header chain must fit the first fragment While building ipv6 datagram we currently allow arbitrary large extheaders, even beyond pmtu size. The syzbot has found a way to exploit the above to trigger the following splat: kernel BUG at ./include/linux/skbuff.h:2073! invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 1 PID: 4230 Comm: syzkaller672661 Not tainted 4.16.0-rc2+ #326 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:__skb_pull include/linux/skbuff.h:2073 [inline] RIP: 0010:__ip6_make_skb+0x1ac8/0x2190 net/ipv6/ip6_output.c:1636 RSP: 0018:ffff8801bc18f0f0 EFLAGS: 00010293 RAX: ffff8801b17400c0 RBX: 0000000000000738 RCX: ffffffff84f01828 RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8801b415ac18 RBP: ffff8801bc18f360 R08: ffff8801b4576844 R09: 0000000000000000 R10: ffff8801bc18f380 R11: ffffed00367aee4e R12: 00000000000000d6 R13: ffff8801b415a740 R14: dffffc0000000000 R15: ffff8801b45767c0 FS: 0000000001535880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000002000b000 CR3: 00000001b4123001 CR4: 00000000001606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ip6_finish_skb include/net/ipv6.h:969 [inline] udp_v6_push_pending_frames+0x269/0x3b0 net/ipv6/udp.c:1073 udpv6_sendmsg+0x2a96/0x3400 net/ipv6/udp.c:1343 inet_sendmsg+0x11f/0x5e0 net/ipv4/af_inet.c:764 sock_sendmsg_nosec net/socket.c:630 [inline] sock_sendmsg+0xca/0x110 net/socket.c:640 ___sys_sendmsg+0x320/0x8b0 net/socket.c:2046 __sys_sendmmsg+0x1ee/0x620 net/socket.c:2136 SYSC_sendmmsg net/socket.c:2167 [inline] SyS_sendmmsg+0x35/0x60 net/socket.c:2162 do_syscall_64+0x280/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x4404c9 RSP: 002b:00007ffdce35f948 EFLAGS: 00000217 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004404c9 RDX: 0000000000000003 RSI: 0000000020001f00 RDI: 0000000000000003 RBP: 00000000006cb018 R08: 00000000004002c8 R09: 00000000004002c8 R10: 0000000020000080 R11: 0000000000000217 R12: 0000000000401df0 R13: 0000000000401e80 R14: 0000000000000000 R15: 0000000000000000 Code: ff e8 1d 5e b9 fc e9 15 e9 ff ff e8 13 5e b9 fc e9 44 e8 ff ff e8 29 5e b9 fc e9 c0 e6 ff ff e8 3f f3 80 fc 0f 0b e8 38 f3 80 fc <0f> 0b 49 8d 87 80 00 00 00 4d 8d 87 84 00 00 00 48 89 85 20 fe RIP: __skb_pull include/linux/skbuff.h:2073 [inline] RSP: ffff8801bc18f0f0 RIP: __ip6_make_skb+0x1ac8/0x2190 net/ipv6/ip6_output.c:1636 RSP: ffff8801bc18f0f0 As stated by RFC 7112 section 5: When a host fragments an IPv6 datagram, it MUST include the entire IPv6 Header Chain in the First Fragment. So this patch addresses the issue dropping datagrams with excessive extheader length. It also updates the error path to report to the calling socket nonnegative pmtu values. The issue apparently predates git history. v1 -> v2: cleanup error path, as per Eric's suggestion Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+91e6f9932ff122fa4410@syzkaller.appspotmail.com Signed-off-by: Paolo Abeni Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a8a919520090..5cb18c8ba9b2 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1246,7 +1246,7 @@ static int __ip6_append_data(struct sock *sk, const struct sockcm_cookie *sockc) { struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; int exthdrlen = 0; int dst_exthdrlen = 0; int hh_len; @@ -1282,6 +1282,12 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit + * the first fragment + */ + if (headersize + transhdrlen > mtu) + goto emsgsize; + if (cork->length + length > mtu - headersize && ipc6->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_RAW)) { @@ -1297,9 +1303,8 @@ static int __ip6_append_data(struct sock *sk, if (cork->length + length > maxnonfragsize - headersize) { emsgsize: - ipv6_local_error(sk, EMSGSIZE, fl6, - mtu - headersize + - sizeof(struct ipv6hdr)); + pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); + ipv6_local_error(sk, EMSGSIZE, fl6, pmtu); return -EMSGSIZE; } -- cgit v1.2.1 From eb82a994479245a79647d302f9b4eb8e7c9d7ca6 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Sat, 24 Mar 2018 22:25:06 -0700 Subject: net: sched, fix OOO packets with pfifo_fast After the qdisc lock was dropped in pfifo_fast we allow multiple enqueue threads and dequeue threads to run in parallel. On the enqueue side the skb bit ooo_okay is used to ensure all related skbs are enqueued in-order. On the dequeue side though there is no similar logic. What we observe is with fewer queues than CPUs it is possible to re-order packets when two instances of __qdisc_run() are running in parallel. Each thread will dequeue a skb and then whichever thread calls the ndo op first will be sent on the wire. This doesn't typically happen because qdisc_run() is usually triggered by the same core that did the enqueue. However, drivers will trigger __netif_schedule() when queues are transitioning from stopped to awake using the netif_tx_wake_* APIs. When this happens netif_schedule() calls qdisc_run() on the same CPU that did the netif_tx_wake_* which is usually done in the interrupt completion context. This CPU is selected with the irq affinity which is unrelated to the enqueue operations. To resolve this we add a RUNNING bit to the qdisc to ensure only a single dequeue per qdisc is running. Enqueue and dequeue operations can still run in parallel and also on multi queue NICs we can still have a dequeue in-flight per qdisc, which is typically per CPU. Fixes: c5ad119fb6c0 ("net: sched: pfifo_fast use skb_array") Reported-by: Jakob Unterwurzacher Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7e3fbe9cc936..39c144b6ff98 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -373,24 +373,33 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, */ static inline bool qdisc_restart(struct Qdisc *q, int *packets) { + bool more, validate, nolock = q->flags & TCQ_F_NOLOCK; spinlock_t *root_lock = NULL; struct netdev_queue *txq; struct net_device *dev; struct sk_buff *skb; - bool validate; /* Dequeue packet */ + if (nolock && test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) + return false; + skb = dequeue_skb(q, &validate, packets); - if (unlikely(!skb)) + if (unlikely(!skb)) { + if (nolock) + clear_bit(__QDISC_STATE_RUNNING, &q->state); return false; + } - if (!(q->flags & TCQ_F_NOLOCK)) + if (!nolock) root_lock = qdisc_lock(q); dev = qdisc_dev(q); txq = skb_get_tx_queue(dev, skb); - return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); + more = sch_direct_xmit(skb, q, dev, txq, root_lock, validate); + if (nolock) + clear_bit(__QDISC_STATE_RUNNING, &q->state); + return more; } void __qdisc_run(struct Qdisc *q) -- cgit v1.2.1 From 1dfe82ebd7d8fd43dba9948fdfb31f145014baa0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 26 Mar 2018 08:08:07 -0700 Subject: net: fix possible out-of-bound read in skb_network_protocol() skb mac header is not necessarily set at the time skb_network_protocol() is called. Use skb->data instead. BUG: KASAN: slab-out-of-bounds in skb_network_protocol+0x46b/0x4b0 net/core/dev.c:2739 Read of size 2 at addr ffff8801b3097a0b by task syz-executor5/14242 CPU: 1 PID: 14242 Comm: syz-executor5 Not tainted 4.16.0-rc6+ #280 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x24d lib/dump_stack.c:53 print_address_description+0x73/0x250 mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:354 [inline] kasan_report+0x23c/0x360 mm/kasan/report.c:412 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/report.c:443 skb_network_protocol+0x46b/0x4b0 net/core/dev.c:2739 harmonize_features net/core/dev.c:2924 [inline] netif_skb_features+0x509/0x9b0 net/core/dev.c:3011 validate_xmit_skb+0x81/0xb00 net/core/dev.c:3084 validate_xmit_skb_list+0xbf/0x120 net/core/dev.c:3142 packet_direct_xmit+0x117/0x790 net/packet/af_packet.c:256 packet_snd net/packet/af_packet.c:2944 [inline] packet_sendmsg+0x3aed/0x60b0 net/packet/af_packet.c:2969 sock_sendmsg_nosec net/socket.c:629 [inline] sock_sendmsg+0xca/0x110 net/socket.c:639 ___sys_sendmsg+0x767/0x8b0 net/socket.c:2047 __sys_sendmsg+0xe5/0x210 net/socket.c:2081 Fixes: 19acc327258a ("gso: Handle Trans-Ether-Bridging protocol in skb_network_protocol()") Signed-off-by: Eric Dumazet Cc: Pravin B Shelar Reported-by: Reported-by: syzbot Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 12be20535714..ef0cc6ea5f8d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2735,7 +2735,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) return 0; - eth = (struct ethhdr *)skb_mac_header(skb); + eth = (struct ethhdr *)skb->data; type = eth->h_proto; } -- cgit v1.2.1 From 734549eb550c0c720bc89e50501f1b1e98cdd841 Mon Sep 17 00:00:00 2001 From: Craig Dillabaugh Date: Mon, 26 Mar 2018 14:58:32 -0400 Subject: net sched actions: fix dumping which requires several messages to user space Fixes a bug in the tcf_dump_walker function that can cause some actions to not be reported when dumping a large number of actions. This issue became more aggrevated when cookies feature was added. In particular this issue is manifest when large cookie values are assigned to the actions and when enough actions are created that the resulting table must be dumped in multiple batches. The number of actions returned in each batch is limited by the total number of actions and the memory buffer size. With small cookies the numeric limit is reached before the buffer size limit, which avoids the code path triggering this bug. When large cookies are used buffer fills before the numeric limit, and the erroneous code path is hit. For example after creating 32 csum actions with the cookie aaaabbbbccccdddd $ tc actions ls action csum total acts 26 action order 0: csum (tcp) action continue index 1 ref 1 bind 0 cookie aaaabbbbccccdddd ..... action order 25: csum (tcp) action continue index 26 ref 1 bind 0 cookie aaaabbbbccccdddd total acts 6 action order 0: csum (tcp) action continue index 28 ref 1 bind 0 cookie aaaabbbbccccdddd ...... action order 5: csum (tcp) action continue index 32 ref 1 bind 0 cookie aaaabbbbccccdddd Note that the action with index 27 is omitted from the report. Fixes: 4b3550ef530c ("[NET_SCHED]: Use nla_nest_start/nla_nest_end")" Signed-off-by: Craig Dillabaugh Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_api.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/act_api.c b/net/sched/act_api.c index eba6682727dd..efc6bfb9a4e0 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -135,8 +135,10 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, continue; nest = nla_nest_start(skb, n_i); - if (!nest) + if (!nest) { + index--; goto nla_put_failure; + } err = tcf_action_dump_1(skb, p, 0, 0); if (err < 0) { index--; -- cgit v1.2.1 From cd00edc179863848abab5cc5683de5b7b5f70954 Mon Sep 17 00:00:00 2001 From: Dave Watson Date: Mon, 26 Mar 2018 12:31:21 -0700 Subject: strparser: Fix sign of err codes strp_parser_err is called with a negative code everywhere, which then calls abort_parser with a negative code. strp_msg_timeout calls abort_parser directly with a positive code. Negate ETIMEDOUT to match signed-ness of other calls. The default abort_parser callback, strp_abort_strp, sets sk->sk_err to err. Also negate the error here so sk_err always holds a positive value, as the rest of the net code expects. Currently a negative sk_err can result in endless loops, or user code that thinks it actually sent/received err bytes. Found while testing net/tls_sw recv path. Fixes: 43a0c6751a322847 ("strparser: Stream parser for messages") Signed-off-by: Dave Watson Signed-off-by: David S. Miller --- net/strparser/strparser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 1fdab5c4eda8..b9283ce5cd85 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -60,7 +60,7 @@ static void strp_abort_strp(struct strparser *strp, int err) struct sock *sk = strp->sk; /* Report an error on the lower socket */ - sk->sk_err = err; + sk->sk_err = -err; sk->sk_error_report(sk); } } @@ -458,7 +458,7 @@ static void strp_msg_timeout(struct work_struct *w) /* Message assembly timed out */ STRP_STATS_INCR(strp->stats.msg_timeouts); strp->cb.lock(strp); - strp->cb.abort_parser(strp, ETIMEDOUT); + strp->cb.abort_parser(strp, -ETIMEDOUT); strp->cb.unlock(strp); } -- cgit v1.2.1 From b85ab56c3f81c5a24b5a5213374f549df06430da Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 26 Mar 2018 15:08:33 -0700 Subject: llc: properly handle dev_queue_xmit() return value llc_conn_send_pdu() pushes the skb into write queue and calls llc_conn_send_pdus() to flush them out. However, the status of dev_queue_xmit() is not returned to caller, in this case, llc_conn_state_process(). llc_conn_state_process() needs hold the skb no matter success or failure, because it still uses it after that, therefore we should hold skb before dev_queue_xmit() when that skb is the one being processed by llc_conn_state_process(). For other callers, they can just pass NULL and ignore the return value as they are. Reported-by: Noam Rathaus Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/llc/llc_c_ac.c | 15 +++++++++------ net/llc/llc_conn.c | 32 +++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index f59648018060..163121192aca 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - llc_conn_send_pdu(sk, skb); + rc = llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; @@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - llc_conn_send_pdu(sk, skb); + rc = llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; @@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); + int ret; if (llc->ack_must_be_send) { - llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); + ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0 ; llc->ack_pf = 0; - } else - llc_conn_ac_send_i_cmd_p_set_0(sk, skb); - return 0; + } else { + ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb); + } + + return ret; } /** diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 9177dbb16dce..110e32bcb399 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -30,7 +30,7 @@ #endif static int llc_find_offset(int state, int ev_type); -static void llc_conn_send_pdus(struct sock *sk); +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb); static int llc_conn_service(struct sock *sk, struct sk_buff *skb); static int llc_exec_conn_trans_actions(struct sock *sk, struct llc_conn_state_trans *trans, @@ -193,11 +193,11 @@ out_skb_put: return rc; } -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) { /* queue PDU to send to MAC layer */ skb_queue_tail(&sk->sk_write_queue, skb); - llc_conn_send_pdus(sk); + return llc_conn_send_pdus(sk, skb); } /** @@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk); + llc_conn_send_pdus(sk, NULL); out:; } @@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk); + llc_conn_send_pdus(sk, NULL); out:; } @@ -340,12 +340,16 @@ out: /** * llc_conn_send_pdus - Sends queued PDUs * @sk: active connection + * @hold_skb: the skb held by caller, or NULL if does not care * - * Sends queued pdus to MAC layer for transmission. + * Sends queued pdus to MAC layer for transmission. When @hold_skb is + * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent + * successfully, or 1 for failure. */ -static void llc_conn_send_pdus(struct sock *sk) +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) { struct sk_buff *skb; + int ret = 0; while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); @@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk) skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); if (!skb2) break; - skb = skb2; + dev_queue_xmit(skb2); + } else { + bool is_target = skb == hold_skb; + int rc; + + if (is_target) + skb_get(skb); + rc = dev_queue_xmit(skb); + if (is_target) + ret = rc; } - dev_queue_xmit(skb); } + + return ret; } /** -- cgit v1.2.1 From ab6f6dd18ab801fcbbaa05fb5401e91f48778e04 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Tue, 27 Mar 2018 10:43:50 +0200 Subject: net/smc: use announced length in sock_recvmsg() Not every CLC proposal message needs the maximum buffer length. Due to the MSG_WAITALL flag, it is important to use the peeked real length when receiving the message. Fixes: d63d271ce2b5ce ("smc: switch to sock_recvmsg()") Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_clc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 8ac51583a063..15c213250606 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -133,7 +133,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, /* receive the complete CLC message */ memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen); + iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); krflags = MSG_WAITALL; smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; len = sock_recvmsg(smc->clcsock, &msg, krflags); -- cgit v1.2.1 From 28913ee8191adf4bbc01cbfb9ee18cca782ab141 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 29 Mar 2018 09:24:28 +0900 Subject: netfilter: nf_nat_snmp_basic: add correct dependency to Makefile nf_nat_snmp_basic_main.c includes a generated header, but the necessary dependency is missing in Makefile. This could cause build error in parallel building. Remove a weird line, and add a correct one. Fixes: cc2d58634e0f ("netfilter: nf_nat_snmp_basic: use asn1 decoder library") Reported-by: Stephen Rothwell Signed-off-by: Masahiro Yamada --- net/ipv4/netfilter/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 2dad20eefd26..9bd19cd18849 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o -nf_nat_snmp_basic-y : nf_nat_snmp_basic-asn1.h nf_nat_snmp_basic-asn1.c +$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic-asn1.h obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h -- cgit v1.2.1 From 5568cdc368c349eee7b5fc48bc956234a0828d71 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2018 11:42:14 -0400 Subject: ip_tunnel: Resolve ipsec merge conflict properly. We want to use dev_set_mtu() regardless of how we calculate the mtu value. Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index f3db1f35a79d..a7fd1c5a2a14 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -1120,14 +1120,14 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], if (tb[IFLA_MTU]) { unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; - dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, - (unsigned int)(max - sizeof(struct iphdr))); - } else { - err = dev_set_mtu(dev, mtu); - if (err) - goto err_dev_set_mtu; + mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, + (unsigned int)(max - sizeof(struct iphdr))); } + err = dev_set_mtu(dev, mtu); + if (err) + goto err_dev_set_mtu; + ip_tunnel_add(itn, nt); return 0; -- cgit v1.2.1 From ae4745730cf8e693d354ccd4dbaf59ea440c09a9 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Thu, 29 Mar 2018 19:05:29 +0900 Subject: net: Fix untag for vlan packets without ethernet header In some situation vlan packets do not have ethernet headers. One example is packets from tun devices. Users can specify vlan protocol in tun_pi field instead of IP protocol, and skb_vlan_untag() attempts to untag such packets. skb_vlan_untag() (more precisely, skb_reorder_vlan_header() called by it) however did not expect packets without ethernet headers, so in such a case size argument for memmove() underflowed and triggered crash. ==== BUG: unable to handle kernel paging request at ffff8801cccb8000 IP: __memmove+0x24/0x1a0 arch/x86/lib/memmove_64.S:43 PGD 9cee067 P4D 9cee067 PUD 1d9401063 PMD 1cccb7063 PTE 2810100028101 Oops: 000b [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 1 PID: 17663 Comm: syz-executor2 Not tainted 4.16.0-rc7+ #368 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:__memmove+0x24/0x1a0 arch/x86/lib/memmove_64.S:43 RSP: 0018:ffff8801cc046e28 EFLAGS: 00010287 RAX: ffff8801ccc244c4 RBX: fffffffffffffffe RCX: fffffffffff6c4c2 RDX: fffffffffffffffe RSI: ffff8801cccb7ffc RDI: ffff8801cccb8000 RBP: ffff8801cc046e48 R08: ffff8801ccc244be R09: ffffed0039984899 R10: 0000000000000001 R11: ffffed0039984898 R12: ffff8801ccc244c4 R13: ffff8801ccc244c0 R14: ffff8801d96b7c06 R15: ffff8801d96b7b40 FS: 00007febd562d700(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffff8801cccb8000 CR3: 00000001ccb2f006 CR4: 00000000001606e0 DR0: 0000000020000000 DR1: 0000000020000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600 Call Trace: memmove include/linux/string.h:360 [inline] skb_reorder_vlan_header net/core/skbuff.c:5031 [inline] skb_vlan_untag+0x470/0xc40 net/core/skbuff.c:5061 __netif_receive_skb_core+0x119c/0x3460 net/core/dev.c:4460 __netif_receive_skb+0x2c/0x1b0 net/core/dev.c:4627 netif_receive_skb_internal+0x10b/0x670 net/core/dev.c:4701 netif_receive_skb+0xae/0x390 net/core/dev.c:4725 tun_rx_batched.isra.50+0x5ee/0x870 drivers/net/tun.c:1555 tun_get_user+0x299e/0x3c20 drivers/net/tun.c:1962 tun_chr_write_iter+0xb9/0x160 drivers/net/tun.c:1990 call_write_iter include/linux/fs.h:1782 [inline] new_sync_write fs/read_write.c:469 [inline] __vfs_write+0x684/0x970 fs/read_write.c:482 vfs_write+0x189/0x510 fs/read_write.c:544 SYSC_write fs/read_write.c:589 [inline] SyS_write+0xef/0x220 fs/read_write.c:581 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x454879 RSP: 002b:00007febd562cc68 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 00007febd562d6d4 RCX: 0000000000454879 RDX: 0000000000000157 RSI: 0000000020000180 RDI: 0000000000000014 RBP: 000000000072bea0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff R13: 00000000000006b0 R14: 00000000006fc120 R15: 0000000000000000 Code: 90 90 90 90 90 90 90 48 89 f8 48 83 fa 20 0f 82 03 01 00 00 48 39 fe 7d 0f 49 89 f0 49 01 d0 49 39 f8 0f 8f 9f 00 00 00 48 89 d1 a4 c3 48 81 fa a8 02 00 00 72 05 40 38 fe 74 3b 48 83 ea 20 RIP: __memmove+0x24/0x1a0 arch/x86/lib/memmove_64.S:43 RSP: ffff8801cc046e28 CR2: ffff8801cccb8000 ==== We don't need to copy headers for packets which do not have preceding headers of vlan headers, so skip memmove() in that case. Fixes: 4bbb3e0e8239 ("net: Fix vlan untag for bridge and vlan_dev with reorder_hdr off") Reported-by: Eric Dumazet Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- net/core/skbuff.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1e7acdc30732..857e4e6f751a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5028,8 +5028,10 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) } mac_len = skb->data - skb_mac_header(skb); - memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), - mac_len - VLAN_HLEN - ETH_TLEN); + if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), + mac_len - VLAN_HLEN - ETH_TLEN); + } skb->mac_header += VLAN_HLEN; return skb; } -- cgit v1.2.1 From 5807b22c9164a21cd1077a9bc587f0bba361f72d Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Thu, 29 Mar 2018 17:59:36 +0100 Subject: ipv6: sr: fix seg6 encap performances with TSO enabled Enabling TSO can lead to abysmal performances when using seg6 in encap mode, such as with the ixgbe driver. This patch adds a call to iptunnel_handle_offloads() to remove the encapsulation bit if needed. Before: root@comp4-seg6bpf:~# iperf3 -c fc00::55 Connecting to host fc00::55, port 5201 [ 4] local fc45::4 port 36592 connected to fc00::55 port 5201 [ ID] Interval Transfer Bandwidth Retr Cwnd [ 4] 0.00-1.00 sec 196 KBytes 1.60 Mbits/sec 47 6.66 KBytes [ 4] 1.00-2.00 sec 304 KBytes 2.49 Mbits/sec 100 5.33 KBytes [ 4] 2.00-3.00 sec 284 KBytes 2.32 Mbits/sec 92 5.33 KBytes After: root@comp4-seg6bpf:~# iperf3 -c fc00::55 Connecting to host fc00::55, port 5201 [ 4] local fc45::4 port 43062 connected to fc00::55 port 5201 [ ID] Interval Transfer Bandwidth Retr Cwnd [ 4] 0.00-1.00 sec 1.03 GBytes 8.89 Gbits/sec 0 743 KBytes [ 4] 1.00-2.00 sec 1.03 GBytes 8.87 Gbits/sec 0 743 KBytes [ 4] 2.00-3.00 sec 1.03 GBytes 8.87 Gbits/sec 0 743 KBytes Reported-by: Tom Herbert Fixes: 6c8702c60b88 ("ipv6: sr: add support for SRH encapsulation and injection with lwtunnels") Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_iptunnel.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 7a78dcfda68a..f343e6f0fc95 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -211,11 +212,6 @@ static int seg6_do_srh(struct sk_buff *skb) tinfo = seg6_encap_lwtunnel(dst->lwtstate); - if (likely(!skb->encapsulation)) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } - switch (tinfo->mode) { case SEG6_IPTUN_MODE_INLINE: if (skb->protocol != htons(ETH_P_IPV6)) @@ -224,10 +220,12 @@ static int seg6_do_srh(struct sk_buff *skb) err = seg6_do_srh_inline(skb, tinfo->srh); if (err) return err; - - skb_reset_inner_headers(skb); break; case SEG6_IPTUN_MODE_ENCAP: + err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6); + if (err) + return err; + if (skb->protocol == htons(ETH_P_IPV6)) proto = IPPROTO_IPV6; else if (skb->protocol == htons(ETH_P_IP)) @@ -239,6 +237,8 @@ static int seg6_do_srh(struct sk_buff *skb) if (err) return err; + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); + skb_set_inner_protocol(skb, skb->protocol); skb->protocol = htons(ETH_P_IPV6); break; case SEG6_IPTUN_MODE_L2ENCAP: @@ -262,8 +262,6 @@ static int seg6_do_srh(struct sk_buff *skb) ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - skb_set_inner_protocol(skb, skb->protocol); - return 0; } -- cgit v1.2.1 From b6cdbc85234b072340b8923e69f49ec293f905dc Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 29 Mar 2018 17:44:57 -0700 Subject: net/ipv6: Fix route leaking between VRFs Donald reported that IPv6 route leaking between VRFs is not working. The root cause is the strict argument in the call to rt6_lookup when validating the nexthop spec. ip6_route_check_nh validates the gateway and device (if given) of a route spec. It in turn could call rt6_lookup (e.g., lookup in a given table did not succeed so it falls back to a full lookup) and if so sets the strict argument to 1. That means if the egress device is given, the route lookup needs to return a result with the same device. This strict requirement does not work with VRFs (IPv4 or IPv6) because the oif in the flow struct is overridden with the index of the VRF device to trigger a match on the l3mdev rule and force the lookup to its table. The right long term solution is to add an l3mdev index to the flow struct such that the oif is not overridden. That solution will not backport well, so this patch aims for a simpler solution to relax the strict argument if the route spec device is an l3mdev slave. As done in other places, use the FLOWI_FLAG_SKIP_NH_OIF to know that the RT6_LOOKUP_F_IFACE flag needs to be removed. Fixes: ca254490c8df ("net: Add VRF support to IPv6 stack") Reported-by: Donald Sharp Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b33d057ac5eb..fc74352fac12 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -919,6 +919,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct rt6_info *rt, *rt_cache; struct fib6_node *fn; + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) + flags &= ~RT6_LOOKUP_F_IFACE; + rcu_read_lock(); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: -- cgit v1.2.1