summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-05 15:20:27 -0700
committerDavid S. Miller <davem@davemloft.net>2005-07-05 15:20:27 -0700
commitb4e26f5ea0dbdd1e813c5571fb467022d8eb948a (patch)
tree6d1d36cafebb126ff3946443e6d03fec60776f5e
parentaa93466bdfd901b926e033801f0b82b3eaa67be2 (diff)
downloadblackbird-op-linux-b4e26f5ea0dbdd1e813c5571fb467022d8eb948a.tar.gz
blackbird-op-linux-b4e26f5ea0dbdd1e813c5571fb467022d8eb948a.zip
[TCP]: Fix send-side cpu utiliziation regression.
Only put user data purely to pages when doing TSO. The extra page allocations cause two problems: 1) Add the overhead of the page allocations themselves. 2) Make us do small user copies when we get to the end of the TCP socket cache page. It is still beneficial to purely use pages for TSO, so we will do it for that case. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index be354155b2f9..2ba73bf3a8f9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -756,8 +756,17 @@ static inline int select_size(struct sock *sk, struct tcp_sock *tp)
{
int tmp = tp->mss_cache_std;
- if (sk->sk_route_caps & NETIF_F_SG)
- tmp = 0;
+ if (sk->sk_route_caps & NETIF_F_SG) {
+ if (sk->sk_route_caps & NETIF_F_TSO)
+ tmp = 0;
+ else {
+ int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
+
+ if (tmp >= pgbreak &&
+ tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
+ tmp = pgbreak;
+ }
+ }
return tmp;
}
OpenPOWER on IntegriCloud