diff options
author | David S. Miller <davem@davemloft.net> | 2016-01-10 17:54:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-01-10 17:54:28 -0500 |
commit | 749f7df18689b113dc0d914e40122b37ddaeff41 (patch) | |
tree | 3b01575dd9d774bcfbeb65f0ca302d606a26f895 | |
parent | 4156afafcc4c522adfd59c694126edc30247b7ad (diff) | |
parent | f8ffad69c9f8b8dfb0b633425d4ef4d2493ba61a (diff) | |
download | blackbird-op-linux-749f7df18689b113dc0d914e40122b37ddaeff41.tar.gz blackbird-op-linux-749f7df18689b113dc0d914e40122b37ddaeff41.zip |
Merge branch 'bpf-next'
Daniel Borkmann says:
====================
BPF update
Fixes a csum issue on ingress. As mentioned previously, net-next
seems just fine imho. Later on, will follow up with couple of
replacements like ovs_skb_postpush_rcsum() etc.
Thanks!
v1 -> v2:
- Added patch 1 with helper
- Implemented Hannes' idea to just use csum_partial, thanks!
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/skbuff.h | 17 | ||||
-rw-r--r-- | include/net/sch_generic.h | 9 | ||||
-rw-r--r-- | net/core/filter.c | 17 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 6 |
4 files changed, 40 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6b6bd42d6134..07f9ccd28654 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2805,6 +2805,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +static inline void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + /* For performing the reverse operation to skb_postpull_rcsum(), + * we can instead of ... + * + * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); + * + * ... just use this equivalent version here to save a few + * instructions. Feeding csum of 0 in csum_partial() and later + * on adding skb->csum is equivalent to feed skb->csum in the + * first place. + */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_partial(start, len, skb->csum); +} + /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index b2a8e6338576..636a362a0e03 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -407,6 +407,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); int skb_do_redirect(struct sk_buff *); +static inline bool skb_at_tc_ingress(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_CLS_ACT + return G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + return false; +#endif +} + /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) { diff --git a/net/core/filter.c b/net/core/filter.c index 35e6fed28709..0db92b5e2cbf 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1368,8 +1368,9 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) /* skb_store_bits cannot return -EFAULT here */ skb_store_bits(skb, offset, ptr, len); - if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); + if (BPF_RECOMPUTE_CSUM(flags)) + skb_postpush_rcsum(skb, ptr, len); + return 0; } @@ -1525,8 +1526,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) if (unlikely(!skb2)) return -ENOMEM; - if (BPF_IS_REDIRECT_INGRESS(flags)) + if (BPF_IS_REDIRECT_INGRESS(flags)) { + if (skb_at_tc_ingress(skb2)) + skb_postpush_rcsum(skb2, skb_mac_header(skb2), + skb2->mac_len); return dev_forward_skb(dev, skb2); + } skb2->dev = dev; skb_sender_cpu_clear(skb2); @@ -1569,8 +1574,12 @@ int skb_do_redirect(struct sk_buff *skb) return -EINVAL; } - if (BPF_IS_REDIRECT_INGRESS(ri->flags)) + if (BPF_IS_REDIRECT_INGRESS(ri->flags)) { + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), + skb->mac_len); return dev_forward_skb(dev, skb); + } skb->dev = dev; skb_sender_cpu_clear(skb); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5faaa5425f7b..b3c8bb4aeef5 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -79,12 +79,8 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct cls_bpf_head *head = rcu_dereference_bh(tp->root); + bool at_ingress = skb_at_tc_ingress(skb); struct cls_bpf_prog *prog; -#ifdef CONFIG_NET_CLS_ACT - bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; -#else - bool at_ingress = false; -#endif int ret = -1; if (unlikely(!skb_mac_header_was_set(skb))) |