summaryrefslogtreecommitdiffstats
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-09-06 12:45:26 -0700
committerDavid S. Miller <davem@davemloft.net>2016-09-06 12:45:26 -0700
commit60175ccdf46ac5076725cb3e66f6bc2e2766ad2b (patch)
treea5433388291a0151a00c54db596d2baf8ce649ab /include/linux/rhashtable.h
parent2f5281ba2a8feaf6f0aee93356f350855bb530fc (diff)
parent779994fa3636d46848edb402fe7517968e036e6f (diff)
downloadtalos-obmc-linux-60175ccdf46ac5076725cb3e66f6bc2e2766ad2b.tar.gz
talos-obmc-linux-60175ccdf46ac5076725cb3e66f6bc2e2766ad2b.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains Netfilter updates for your net-next tree. Most relevant updates are the removal of per-conntrack timers to use a workqueue/garbage collection approach instead from Florian Westphal, the hash and numgen expression for nf_tables from Laura Garcia, updates on nf_tables hash set to honor the NLM_F_EXCL flag, removal of ip_conntrack sysctl and many other incremental updates on our Netfilter codebase. More specifically, they are: 1) Retrieve only 4 bytes to fetch ports in case of non-linear skb transport area in dccp, sctp, tcp, udp and udplite protocol conntrackers, from Gao Feng. 2) Missing whitespace on error message in physdev match, from Hangbin Liu. 3) Skip redundant IPv4 checksum calculation in nf_dup_ipv4, from Liping Zhang. 4) Add nf_ct_expires() helper function and use it, from Florian Westphal. 5) Replace opencoded nf_ct_kill() call in IPVS conntrack support, also from Florian. 6) Rename nf_tables set implementation to nft_set_{name}.c 7) Introduce the hash expression to allow arbitrary hashing of selector concatenations, from Laura Garcia Liebana. 8) Remove ip_conntrack sysctl backward compatibility code, this code has been around for long time already, and we have two interfaces to do this already: nf_conntrack sysctl and ctnetlink. 9) Use nf_conntrack_get_ht() helper function whenever possible, instead of opencoding fetch of hashtable pointer and size, patch from Liping Zhang. 10) Add quota expression for nf_tables. 11) Add number generator expression for nf_tables, this supports incremental and random generators that can be combined with maps, very useful for load balancing purpose, again from Laura Garcia Liebana. 12) Fix a typo in a debug message in FTP conntrack helper, from Colin Ian King. 13) Introduce a nft_chain_parse_hook() helper function to parse chain hook configuration, this is used by a follow up patch to perform better chain update validation. 14) Add rhashtable_lookup_get_insert_key() to rhashtable and use it from the nft_set_hash implementation to honor the NLM_F_EXCL flag. 15) Missing nulls check in nf_conntrack from nf_conntrack_tuple_taken(), patch from Florian Westphal. 16) Don't use the DYING bit to know if the conntrack event has been already delivered, instead a state variable to track event re-delivery states, also from Florian. 17) Remove the per-conntrack timer, use the workqueue approach that was discussed during the NFWS, from Florian Westphal. 18) Use the netlink conntrack table dump path to kill stale entries, again from Florian. 19) Add a garbage collector to get rid of stale conntracks, from Florian. 20) Reschedule garbage collector if eviction rate is high. 21) Get rid of the __nf_ct_kill_acct() helper. 22) Use ARPHRD_ETHER instead of hardcoded 1 from ARP logger. 23) Make nf_log_set() interface assertive on unsupported families. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h70
1 files changed, 57 insertions, 13 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 8b72ee710f95..fd82584acd48 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -343,7 +343,8 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
struct rhash_head *obj,
- struct bucket_table *old_tbl);
+ struct bucket_table *old_tbl,
+ void **data);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
void rhashtable_walk_enter(struct rhashtable *ht,
@@ -563,8 +564,11 @@ restart:
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
-static inline int __rhashtable_insert_fast(
+/* Internal function, please use rhashtable_insert_fast() instead. This
+ * function returns the existing element already in hashes in there is a clash,
+ * otherwise it returns an error via ERR_PTR().
+ */
+static inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -577,6 +581,7 @@ static inline int __rhashtable_insert_fast(
spinlock_t *lock;
unsigned int elasticity;
unsigned int hash;
+ void *data = NULL;
int err;
restart:
@@ -601,11 +606,14 @@ restart:
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
if (!IS_ERR_OR_NULL(tbl))
goto slow_path;
err = PTR_ERR(tbl);
+ if (err == -EEXIST)
+ err = 0;
+
goto out;
}
@@ -619,25 +627,25 @@ slow_path:
err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
if (err)
- return err;
+ return ERR_PTR(err);
goto restart;
}
- err = -EEXIST;
+ err = 0;
elasticity = ht->elasticity;
rht_for_each(head, tbl, hash) {
if (key &&
unlikely(!(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head)))))
+ rhashtable_compare(&arg, rht_obj(ht, head))))) {
+ data = rht_obj(ht, head);
goto out;
+ }
if (!--elasticity)
goto slow_path;
}
- err = 0;
-
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
@@ -652,7 +660,7 @@ out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err;
+ return err ? ERR_PTR(err) : data;
}
/**
@@ -675,7 +683,13 @@ static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
+ void *ret;
+
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -704,11 +718,15 @@ static inline int rhashtable_lookup_insert_fast(
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
+ void *ret;
BUG_ON(ht->p.obj_hashfn);
- return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
- params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -737,6 +755,32 @@ static inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
+ void *ret;
+
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ ret = __rhashtable_insert_fast(ht, key, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ * @data: pointer to element data already in hashes
+ *
+ * Just like rhashtable_lookup_insert_key(), but this function returns the
+ * object if it exists, NULL if it does not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
BUG_ON(!ht->p.obj_hashfn || !key);
return __rhashtable_insert_fast(ht, key, obj, params);
OpenPOWER on IntegriCloud