summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_ipv4.c36
-rw-r--r--net/ipv4/tcp_offload.c8
4 files changed, 37 insertions, 21 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5afeb5aa4c7c..e9cb2588e416 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -940,7 +940,7 @@ static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
last = li;
}
if (last)
- hlist_add_after_rcu(&last->hlist, &new->hlist);
+ hlist_add_behind_rcu(&new->hlist, &last->hlist);
else
hlist_add_before_rcu(&new->hlist, &li->hlist);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 744af67a5989..181b70ebd964 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -426,13 +426,15 @@ void tcp_init_sock(struct sock *sk)
}
EXPORT_SYMBOL(tcp_init_sock);
-void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
+static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ if (sk->sk_tsflags) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
- sock_tx_timestamp(sk, &shinfo->tx_flags);
- if (shinfo->tx_flags & SKBTX_ANY_SW_TSTAMP)
- shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
+ sock_tx_timestamp(sk, &shinfo->tx_flags);
+ if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
+ shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
+ }
}
/*
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 992a1f926009..dceff5fe8e66 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1167,7 +1167,8 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
+ const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1220,6 +1221,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
return false;
}
+static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = __tcp_v4_inbound_md5_hash(sk, skb);
+ rcu_read_unlock();
+
+ return ret;
+}
+
#endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
@@ -1432,16 +1444,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *rsk;
-#ifdef CONFIG_TCP_MD5SIG
- /*
- * We really want to reject the packet as early as possible
- * if:
- * o We're expecting an MD5'd packet and this is no MD5 tcp option
- * o There is an MD5 option and we're not expecting one
- */
- if (tcp_v4_inbound_md5_hash(sk, skb))
- goto discard;
-#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst = sk->sk_rx_dst;
@@ -1644,6 +1646,18 @@ process:
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * We really want to reject the packet as early as possible
+ * if:
+ * o We're expecting an MD5'd packet and this is no MD5 tcp option
+ * o There is an MD5 option and we're not expecting one
+ */
+ if (tcp_v4_inbound_md5_hash(sk, skb))
+ goto discard_and_relse;
+#endif
+
nf_reset(skb);
if (sk_filter(sk, skb))
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index f597119fc4e7..bc1b83cb8309 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -14,12 +14,12 @@
#include <net/tcp.h>
#include <net/protocol.h>
-void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, unsigned int seq,
- unsigned int mss)
+static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
+ unsigned int seq, unsigned int mss)
{
while (skb) {
- if (ts_seq < (__u64) seq + mss) {
- skb_shinfo(skb)->tx_flags = SKBTX_SW_TSTAMP;
+ if (before(ts_seq, seq + mss)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
skb_shinfo(skb)->tskey = ts_seq;
return;
}
OpenPOWER on IntegriCloud