diff options
author | Yuchung Cheng <ycheng@google.com> | 2016-02-02 10:33:09 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-02-07 14:09:51 -0500 |
commit | d452e6caf8367cc70cf940c24a6a6cc2d521d3c1 (patch) | |
tree | a82946530d5c82a944d0492e5c65168bb5b24d6b /net/ipv4/tcp_input.c | |
parent | 2d14a4def4fc87cb2d2712f7841b45189d75e301 (diff) | |
download | blackbird-op-linux-d452e6caf8367cc70cf940c24a6a6cc2d521d3c1.tar.gz blackbird-op-linux-d452e6caf8367cc70cf940c24a6a6cc2d521d3c1.zip |
tcp: tcp_cong_control helper
Refactor and consolidate cwnd and rate updates into a new function
tcp_cong_control().
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Eric Dumazet <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d598ff408cb9..596c1cb6759a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3323,6 +3323,24 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) return flag & FLAG_DATA_ACKED; } +/* The "ultimate" congestion control function that aims to replace the rigid + * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction). + * It's called toward the end of processing an ACK with precise rate + * information. All transmission or retransmission are delayed afterwards. + */ +static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, + int flag) +{ + if (tcp_in_cwnd_reduction(sk)) { + /* Reduce cwnd if state mandates */ + tcp_cwnd_reduction(sk, acked_sacked, flag); + } else if (tcp_may_raise_cwnd(sk, flag)) { + /* Advance cwnd if state allows */ + tcp_cong_avoid(sk, ack, acked_sacked); + } + tcp_update_pacing_rate(sk); +} + /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ @@ -3553,7 +3571,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets = tp->packets_out; u32 prior_delivered = tp->delivered; int acked = 0; /* Number of packets newly acked */ - u32 acked_sacked; /* Number of packets newly acked or sacked */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ sack_state.first_sackt.v64 = 0; @@ -3653,16 +3670,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); - acked_sacked = tp->delivered - prior_delivered; - /* Advance cwnd if state allows */ - if (tcp_in_cwnd_reduction(sk)) { - /* Reduce cwnd if state mandates */ - tcp_cwnd_reduction(sk, acked_sacked, flag); - } else if (tcp_may_raise_cwnd(sk, flag)) { - /* Advance cwnd if state allows */ - tcp_cong_avoid(sk, ack, acked_sacked); - } - if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { struct dst_entry *dst = __sk_dst_get(sk); if (dst) @@ -3671,7 +3678,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); - tcp_update_pacing_rate(sk); + tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag); tcp_xmit_recovery(sk, rexmit); return 1; |