diff options
author | David S. Miller <davem@davemloft.net> | 2015-10-03 04:32:52 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-03 04:32:52 -0700 |
commit | c3fc7ac9a0b978ee8538058743d21feef25f7b33 (patch) | |
tree | 0caf05649d27830ba0f9548704abbb1ec4b5bb91 /include/net/tcp.h | |
parent | f6d3125fa3c2f55ddf7cf69365c41089de6cfae6 (diff) | |
parent | e994b2f0fb9229aeff5eea9541320bd7b2ca8714 (diff) | |
download | talos-op-linux-c3fc7ac9a0b978ee8538058743d21feef25f7b33.tar.gz talos-op-linux-c3fc7ac9a0b978ee8538058743d21feef25f7b33.zip |
Merge branch 'tcp-lockless-listener'
Eric Dumazet says:
====================
tcp/dccp: lockless listener
TCP listener refactoring : this is becoming interesting !
This patch series takes the steps to use normal TCP/DCCP ehash
table to store SYN_RECV requests, instead of the private per-listener
hash table we had until now.
SYNACK skb are now attached to their syn_recv request socket,
so that we no longer heavily modify listener sk_wmem_alloc.
listener lock is no longer held in fast path, including
SYNCOOKIE mode.
During my tests, my server was able to process 3,500,000
SYN packets per second on one listener and still had available
cpu cycles.
That is about 2 to 3 order of magnitude what we had with older kernels.
This effort started two years ago and I am pleased to reach expectations.
We'll probably extend SO_REUSEPORT to add proper cpu/numa affinities,
so that heavy duty TCP servers can get proper siloing thanks to multi-queues
NIC.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 2c7dfe52f473..a6be56d5f0e3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -462,7 +462,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int tcp_connect(struct sock *sk); struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct tcp_fastopen_cookie *foc); + struct tcp_fastopen_cookie *foc, + bool attach_req); int tcp_disconnect(struct sock *sk, int flags); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); @@ -1618,7 +1619,6 @@ static inline bool tcp_stream_is_thin(struct tcp_sock *tp) /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, - TCP_SEQ_STATE_OPENREQ, TCP_SEQ_STATE_ESTABLISHED, }; @@ -1637,7 +1637,6 @@ struct tcp_iter_state { enum tcp_seq_states state; struct sock *syn_wait_sk; int bucket, offset, sbucket, num; - kuid_t uid; loff_t last_pos; }; @@ -1717,9 +1716,8 @@ struct tcp_request_sock_ops { __u32 (*init_seq)(const struct sk_buff *skb); int (*send_synack)(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, - u16 queue_mapping, struct tcp_fastopen_cookie *foc); - void (*queue_hash_add)(struct sock *sk, struct request_sock *req, - const unsigned long timeout); + u16 queue_mapping, struct tcp_fastopen_cookie *foc, + bool attach_req); }; #ifdef CONFIG_SYN_COOKIES |