diff options
-rw-r--r-- | include/linux/netfilter/x_tables.h | 2 | ||||
-rw-r--r-- | include/net/netfilter/nf_conntrack_core.h | 2 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 10 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_sip.c | 12 |
4 files changed, 16 insertions, 10 deletions
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c2ee5d8550cf..c00cc0c4d0b7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -333,7 +333,7 @@ struct xt_target { /* Called when user tries to insert an entry of this type: hook_mask is a bitmask of hooks from which it can be called. */ - /* Should return true or false, or an error code (-Exxxx). */ + /* Should return 0 on success or an error code otherwise (-Exxxx). */ int (*checkentry)(const struct xt_tgchk_param *); /* Called when entry of this type deleted. */ diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index dffde8e6920e..3d7524fba194 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -61,7 +61,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) int ret = NF_ACCEPT; if (ct && ct != &nf_conntrack_untracked) { - if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) + if (!nf_ct_is_confirmed(ct)) ret = __nf_conntrack_confirm(skb); if (likely(ret == NF_ACCEPT)) nf_ct_deliver_cached_events(ct); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b83c530c5e0a..eeeb8bc73982 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -424,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) spin_lock_bh(&nf_conntrack_lock); + /* We have to check the DYING flag inside the lock to prevent + a race against nf_ct_get_next_corpse() possibly called from + user context, else we insert an already 'dead' hash, blocking + further use of that particular connection -JM */ + + if (unlikely(nf_ct_is_dying(ct))) { + spin_unlock_bh(&nf_conntrack_lock); + return NF_ACCEPT; + } + /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b20f4275893c..53d892210a04 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1393,10 +1393,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, nf_ct_refresh(ct, skb, sip_timeout * HZ); - if (skb_is_nonlinear(skb)) { - pr_debug("Copy of skbuff not supported yet.\n"); - return NF_ACCEPT; - } + if (unlikely(skb_linearize(skb))) + return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; @@ -1455,10 +1453,8 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, nf_ct_refresh(ct, skb, sip_timeout * HZ); - if (skb_is_nonlinear(skb)) { - pr_debug("Copy of skbuff not supported yet.\n"); - return NF_ACCEPT; - } + if (unlikely(skb_linearize(skb))) + return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; |