summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-31 23:25:40 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-31 23:25:40 -0400
commit70ae7222c61d4f19c844c8fe75f053f8976b9552 (patch)
treefd2adc5d5bf77f50ac9f50e126e5b39695bd4427 /include/linux
parent5749d6af4925a9da362b37df920c2a1dc54689a1 (diff)
parentf2d1c724fca176ddbd32a9397df49221afbcb227 (diff)
downloadblackbird-op-linux-70ae7222c61d4f19c844c8fe75f053f8976b9552.tar.gz
blackbird-op-linux-70ae7222c61d4f19c844c8fe75f053f8976b9552.zip
Merge branch 'inet-frags-bring-rhashtables-to-IP-defrag'
Eric Dumazet says: ==================== inet: frags: bring rhashtables to IP defrag IP defrag processing is one of the remaining problematic layer in linux. It uses static hash tables of 1024 buckets, and up to 128 items per bucket. A work queue is supposed to garbage collect items when host is under memory pressure, and doing a hash rebuild, changing seed used in hash computations. This work queue blocks softirqs for up to 25 ms when doing a hash rebuild, occurring every 5 seconds if host is under fire. Then there is the problem of sharing this hash table for all netns. It is time to switch to rhashtables, and allocate one of them per netns to speedup netns dismantle, since this is a critical metric these days. Lookup is now using RCU, and 64bit hosts can now provision whatever amount of memory needed to handle the expected workloads. v2: Addressed Herbert and Kirill feedbacks (Use rhashtable_free_and_destroy(), and split the big patch into small units) v3: Removed the extra add_frag_mem_limit(...) from inet_frag_create() Removed the refcount_inc_not_zero() call from inet_frags_free_cb(), as we can exploit del_timer() return value. v4: kbuild robot feedback about one missing static (squashed) Additional patches : inet: frags: do not clone skb in ip_expire() ipv6: frags: rewrite ip6_expire_frag_queue() rhashtable: reorganize struct rhashtable layout inet: frags: reorganize struct netns_frags inet: frags: get rid of ipfrag_skb_cb/FRAG_CB ipv6: frags: get rid of ip6frag_skb_cb/FRAG6_CB inet: frags: get rid of nf_ct_frag6_skb_cb/NFCT_FRAG6_CB ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/rhashtable.h8
-rw-r--r--include/linux/skbuff.h1
2 files changed, 5 insertions, 4 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 668a21f04b09..1f8ad121eb43 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -152,25 +152,25 @@ struct rhashtable_params {
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @p: Configuration parameters
* @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
- struct rhashtable_params p;
unsigned int max_elems;
+ struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 47082f54ec1f..9065477ed255 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -672,6 +672,7 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
+ int ip_defrag_offset;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
OpenPOWER on IntegriCloud