diff options
author | David S. Miller <davem@davemloft.net> | 2013-02-05 14:12:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-02-05 14:12:20 -0500 |
commit | 188d1f76d0dd3715ceeadfa31376867c3395eb41 (patch) | |
tree | b8976427ec21d3c346f2a993160b368c620c249a /net/ipv4 | |
parent | 577ae39ddb037242964f5fe87fd50b0b89e3263b (diff) | |
parent | bf414b369f158bb527f9f29174ada815f961b44c (diff) | |
download | blackbird-op-linux-188d1f76d0dd3715ceeadfa31376867c3395eb41.tar.gz blackbird-op-linux-188d1f76d0dd3715ceeadfa31376867c3395eb41.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/iwlwifi/dvm/tx.c
net/ipv6/route.c
The ipv6 route.c conflict is simple, just ignore the 'net' side change
as we fixed the same problem in 'net-next' by eliminating cached
neighbours from ipv6 routes.
The e1000e conflict is an addition of a new statistic in the ethtool
code, trivial.
The vmxnet3 conflict is about one change in 'net' removing a guarding
conditional, whilst in 'net-next' we had a netdev_info() conversion.
The iwlwifi conflict is dealing with a WARN_ON() conversion in
'net-next' vs. a revert happening in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_cong.c | 14 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 6 |
3 files changed, 18 insertions, 8 deletions
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc31..cdf2e707bb10 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp) { int cnt; /* increase in packets */ unsigned int delta = 0; + u32 snd_cwnd = tp->snd_cwnd; + + if (unlikely(!snd_cwnd)) { + pr_err_once("snd_cwnd is nul, please report this bug.\n"); + snd_cwnd = 1U; + } /* RFC3465: ABC Slow start * Increase only after a full MSS of bytes is acked @@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp) if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ else - cnt = tp->snd_cwnd; /* exponential increase */ + cnt = snd_cwnd; /* exponential increase */ /* RFC3465: ABC * We MAY increase by 2 if discovered delayed ack @@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp) tp->bytes_acked = 0; tp->snd_cwnd_cnt += cnt; - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { - tp->snd_cwnd_cnt -= tp->snd_cwnd; + while (tp->snd_cwnd_cnt >= snd_cwnd) { + tp->snd_cwnd_cnt -= snd_cwnd; delta++; } - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); } EXPORT_SYMBOL_GPL(tcp_slow_start); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 492c7cfe1453..e376aa9591bc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3482,7 +3482,8 @@ static bool tcp_process_frto(struct sock *sk, int flag) ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) tp->undo_marker = 0; - if (!before(tp->snd_una, tp->frto_highmark)) { + if (!before(tp->snd_una, tp->frto_highmark) || + !tcp_packets_in_flight(tp)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); return true; } @@ -5647,8 +5648,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, * the remote receives only the retransmitted (regular) SYNs: either * the original SYN-data or the corresponding SYN-ACK is lost. */ - syn_drop = (cookie->len <= 0 && data && - inet_csk(sk)->icsk_retransmits); + syn_drop = (cookie->len <= 0 && data && tp->total_retrans); tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a1cfc692df0..0eaf685bddc9 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); goto out; case TCP_SYN_SENT: @@ -1501,8 +1502,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * clogging syn queue with openreqs with exponentially increasing * timeout. */ - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; + } req = inet_reqsk_alloc(&tcp_request_sock_ops); if (!req) @@ -1667,6 +1670,7 @@ drop_and_release: drop_and_free: reqsk_free(req); drop: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); |