diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-09-01 17:48:23 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-01 17:48:23 -0700 |
commit | d80d99d643090c3cf2b1f9fb3fadd1256f7e384f (patch) | |
tree | 5e8bd46fa6c73cace5efb77c43e863cd36edb0c9 /include/net/sock.h | |
parent | 2dac4b96b9362954a0638317b90e3e7bcb112e83 (diff) | |
download | blackbird-op-linux-d80d99d643090c3cf2b1f9fb3fadd1256f7e384f.tar.gz blackbird-op-linux-d80d99d643090c3cf2b1f9fb3fadd1256f7e384f.zip |
[NET]: Add sk_stream_wmem_schedule
This patch introduces sk_stream_wmem_schedule as a short-hand for
the sk_forward_alloc checking on egress.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 312cb25cbd18..e51e626e9af1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) sk_stream_mem_schedule(sk, skb->truesize, 1); } +static inline int sk_stream_wmem_schedule(struct sock *sk, int size) +{ + return size <= sk->sk_forward_alloc || + sk_stream_mem_schedule(sk, size, 0); +} + /* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming @@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, skb = alloc_skb_fclone(size + hdr_len, gfp); if (skb) { skb->truesize += mem; - if (sk->sk_forward_alloc >= (int)skb->truesize || - sk_stream_mem_schedule(sk, skb->truesize, 0)) { + if (sk_stream_wmem_schedule(sk, skb->truesize)) { skb_reserve(skb, hdr_len); return skb; } @@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) { struct page *page = NULL; - if (sk->sk_forward_alloc >= (int)PAGE_SIZE || - sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) + if (sk_stream_wmem_schedule(sk, PAGE_SIZE)) page = alloc_pages(sk->sk_allocation, 0); else { sk->sk_prot->enter_memory_pressure(); |