diff options
author | Eric Dumazet <edumazet@google.com> | 2012-04-27 00:33:38 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-30 21:35:11 -0400 |
commit | d3836f21b0af5513ef55701dd3f50b8c42e44c7a (patch) | |
tree | 69a471411b1dbbc2bb0997dd5f9f53fce6c74a7e /net/core | |
parent | 49cbb1c1e6fd8fb069ef9fbfadc97042168f93bf (diff) | |
download | talos-obmc-linux-d3836f21b0af5513ef55701dd3f50b8c42e44c7a.tar.gz talos-obmc-linux-d3836f21b0af5513ef55701dd3f50b8c42e44c7a.zip |
net: allow skb->head to be a page fragment
skb->head is currently allocated from kmalloc(). This is convenient but
has the drawback the data cannot be converted to a page fragment if
needed.
We have three spots were it hurts :
1) GRO aggregation
When a linear skb must be appended to another skb, GRO uses the
frag_list fallback, very inefficient since we keep all struct sk_buff
around. So drivers enabling GRO but delivering linear skbs to network
stack aren't enabling full GRO power.
2) splice(socket -> pipe).
We must copy the linear part to a page fragment.
This kind of defeats splice() purpose (zero copy claim)
3) TCP coalescing.
Recently introduced, this permits to group several contiguous segments
into a single skb. This shortens queue lengths and save kernel memory,
and greatly reduce probabilities of TCP collapses. This coalescing
doesnt work on linear skbs (or we would need to copy data, this would be
too slow)
Given all these issues, the following patch introduces the possibility
of having skb->head be a fragment in itself. We use a new skb flag,
skb->head_frag to carry this information.
build_skb() is changed to accept a frag_size argument. Drivers willing
to provide a page fragment instead of kmalloc() data will set a non zero
value, set to the fragment size.
Then, on situations we need to convert the skb head to a frag in itself,
we can check if skb->head_frag is set and avoid the copies or various
fallbacks we have.
This means drivers currently using frags could be updated to avoid the
current skb->head allocation and reduce their memory footprint (aka skb
truesize). (thats 512 or 1024 bytes saved per skb). This also makes
bpf/netfilter faster since the 'first frag' will be part of skb linear
part, no need to copy data.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Matt Carlson <mcarlson@broadcom.com>
Cc: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skbuff.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2342a7250391..effa75d0e318 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -245,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb); /** * build_skb - build a network buffer * @data: data buffer provided by caller + * @frag_size: size of fragment, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() @@ -258,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb); * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ -struct sk_buff *build_skb(void *data) +struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; - unsigned int size; + unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; - size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); + skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; @@ -376,6 +378,14 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } +static void skb_free_head(struct sk_buff *skb) +{ + if (skb->head_frag) + put_page(virt_to_head_page(skb->head)); + else + kfree(skb->head); +} + static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || @@ -402,7 +412,7 @@ static void skb_release_data(struct sk_buff *skb) if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); - kfree(skb->head); + skb_free_head(skb); } } @@ -644,6 +654,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) C(tail); C(end); C(head); + C(head_frag); C(data); C(truesize); atomic_set(&n->users, 1); @@ -940,7 +951,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; } - if (fastpath && + if (fastpath && !skb->head_frag && size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { memmove(skb->head + size, skb_shinfo(skb), offsetof(struct skb_shared_info, @@ -967,7 +978,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); if (fastpath) { - kfree(skb->head); + skb_free_head(skb); } else { /* copy this zero copy skb frags */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { @@ -985,6 +996,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, off = (data + nhead) - skb->head; skb->head = data; + skb->head_frag = 0; adjust_others: skb->data += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET |