summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/hash.c39
-rw-r--r--lib/rhashtable.c53
-rw-r--r--lib/test_bpf.c43
4 files changed, 71 insertions, 66 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 7512dc978f18..04e53dd16070 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o hash.o rhashtable.o
+ percpu-refcount.o percpu_ida.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
diff --git a/lib/hash.c b/lib/hash.c
deleted file mode 100644
index fea973f4bd57..000000000000
--- a/lib/hash.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* General purpose hashing library
- *
- * That's a start of a kernel hashing library, which can be extended
- * with further algorithms in future. arch_fast_hash{2,}() will
- * eventually resolve to an architecture optimized implementation.
- *
- * Copyright 2013 Francesco Fusco <ffusco@redhat.com>
- * Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
- * Copyright 2013 Thomas Graf <tgraf@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
- */
-
-#include <linux/jhash.h>
-#include <linux/hash.h>
-#include <linux/cache.h>
-
-static struct fast_hash_ops arch_hash_ops __read_mostly = {
- .hash = jhash,
- .hash2 = jhash2,
-};
-
-u32 arch_fast_hash(const void *data, u32 len, u32 seed)
-{
- return arch_hash_ops.hash(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash);
-
-u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
-{
- return arch_hash_ops.hash2(data, len, seed);
-}
-EXPORT_SYMBOL_GPL(arch_fast_hash2);
-
-static int __init hashlib_init(void)
-{
- setup_arch_fast_hash(&arch_hash_ops);
- return 0;
-}
-early_initcall(hashlib_init);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 624a0b7c05ef..e5f5e69c7a7b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -32,7 +32,7 @@
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
{
- return ht->p.mutex_is_held();
+ return ht->p.mutex_is_held(ht->p.parent);
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
#endif
@@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht,
return obj_hashfn(ht, rht_obj(ht, he), hsize);
}
-static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+static struct bucket_table *bucket_table_alloc(size_t nbuckets)
{
struct bucket_table *tbl;
size_t size;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, flags);
+ tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (tbl == NULL)
tbl = vzalloc(size);
@@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
/**
* rhashtable_expand - Expand hash table while allowing concurrent lookups
* @ht: the hash table to expand
- * @flags: allocation flags
*
* A secondary bucket array is allocated and the hash entries are migrated
* while keeping them on both lists until the end of the RCU grace period.
@@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht,
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
-int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_head *he;
@@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
return 0;
- new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+ new_tbl = bucket_table_alloc(old_tbl->size * 2);
if (new_tbl == NULL)
return -ENOMEM;
@@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
* @ht: the hash table to shrink
- * @flags: allocation flags
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
@@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* The caller must ensure that no concurrent table mutations take place.
* It is however valid to have concurrent lookups if they are RCU protected.
*/
-int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
@@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
if (ht->shift <= ht->p.min_shift)
return 0;
- ntbl = bucket_table_alloc(tbl->size / 2, flags);
+ ntbl = bucket_table_alloc(tbl->size / 2);
if (ntbl == NULL)
return -ENOMEM;
@@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink);
* rhashtable_insert - insert object into hash hash table
* @ht: hash table
* @obj: pointer to hash head inside object
- * @flags: allocation flags (table expansion)
*
* Will automatically grow the table via rhashtable_expand() if the the
* grow_decision function specified at rhashtable_init() returns true.
@@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink);
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
- gfp_t flags)
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
u32 hash;
@@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
ht->nelems++;
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
- rhashtable_expand(ht, flags);
+ rhashtable_expand(ht);
}
EXPORT_SYMBOL_GPL(rhashtable_insert);
@@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
* @ht: hash table
* @obj: pointer to hash head inside object
* @pprev: pointer to previous element
- * @flags: allocation flags (table expansion)
*
* Identical to rhashtable_remove() but caller is alreayd aware of the element
* in front of the element to be deleted. This is in particular useful for
* deletion when combined with walking or lookup.
*/
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev, gfp_t flags)
+ struct rhash_head __rcu **pprev)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
@@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
if (ht->p.shrink_decision &&
ht->p.shrink_decision(ht, tbl->size))
- rhashtable_shrink(ht, flags);
+ rhashtable_shrink(ht);
}
EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
@@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
* rhashtable_remove - remove object from hash table
* @ht: hash table
* @obj: pointer to hash head inside object
- * @flags: allocation flags (table expansion)
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
@@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
* The caller must ensure that no concurrent table mutations occur. It is
* however valid to have concurrent lookups if they are RCU protected.
*/
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
- gfp_t flags)
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
{
struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
struct rhash_head __rcu **pprev;
@@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
continue;
}
- rhashtable_remove_pprev(ht, he, pprev, flags);
+ rhashtable_remove_pprev(ht, he, pprev);
return true;
}
@@ -532,7 +525,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = arch_fast_hash,
+ * #ifdef CONFIG_PROVE_LOCKING
* .mutex_is_held = &my_mutex_is_held,
+ * #endif
* };
*
* Configuration Example 2: Variable length keys
@@ -552,7 +547,9 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params)
* .head_offset = offsetof(struct test_obj, node),
* .hashfn = arch_fast_hash,
* .obj_hashfn = my_hash_fn,
+ * #ifdef CONFIG_PROVE_LOCKING
* .mutex_is_held = &my_mutex_is_held,
+ * #endif
* };
*/
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
@@ -572,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (params->nelem_hint)
size = rounded_hashtable_size(params);
- tbl = bucket_table_alloc(size, GFP_KERNEL);
+ tbl = bucket_table_alloc(size);
if (tbl == NULL)
return -ENOMEM;
@@ -613,10 +610,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy);
#define TEST_PTR ((void *) 0xdeadbeef)
#define TEST_NEXPANDS 4
-static int test_mutex_is_held(void)
+#ifdef CONFIG_PROVE_LOCKING
+static int test_mutex_is_held(void *parent)
{
return 1;
}
+#endif
struct test_obj {
void *ptr;
@@ -707,7 +706,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj->ptr = TEST_PTR;
obj->value = i * 2;
- rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+ rhashtable_insert(ht, &obj->node);
}
rcu_read_lock();
@@ -718,7 +717,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table expansion iteration %u...\n", i);
- rhashtable_expand(ht, GFP_KERNEL);
+ rhashtable_expand(ht);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
@@ -728,7 +727,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table shrinkage iteration %u...\n", i);
- rhashtable_shrink(ht, GFP_KERNEL);
+ rhashtable_shrink(ht);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
@@ -743,7 +742,7 @@ static int __init test_rhashtable(struct rhashtable *ht)
obj = rhashtable_lookup(ht, &key);
BUG_ON(!obj);
- rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+ rhashtable_remove(ht, &obj->node);
kfree(obj);
}
@@ -767,7 +766,9 @@ static int __init test_rht_init(void)
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
.hashfn = arch_fast_hash,
+#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = &test_mutex_is_held,
+#endif
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
};
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 23e070bcf72d..3f167d2eeb94 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1756,6 +1756,49 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } }
},
+ {
+ "nmap reduced",
+ .u.insns_int = {
+ BPF_MOV64_REG(R6, R1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26),
+ BPF_MOV32_IMM(R0, 18),
+ BPF_STX_MEM(BPF_W, R10, R0, -64),
+ BPF_LDX_MEM(BPF_W, R7, R10, -64),
+ BPF_LD_IND(BPF_W, R7, 14),
+ BPF_STX_MEM(BPF_W, R10, R0, -60),
+ BPF_MOV32_IMM(R0, 280971478),
+ BPF_STX_MEM(BPF_W, R10, R0, -56),
+ BPF_LDX_MEM(BPF_W, R7, R10, -56),
+ BPF_LDX_MEM(BPF_W, R0, R10, -60),
+ BPF_ALU32_REG(BPF_SUB, R0, R7),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13),
+ BPF_MOV32_IMM(R0, 22),
+ BPF_STX_MEM(BPF_W, R10, R0, -56),
+ BPF_LDX_MEM(BPF_W, R7, R10, -56),
+ BPF_LD_IND(BPF_H, R7, 14),
+ BPF_STX_MEM(BPF_W, R10, R0, -52),
+ BPF_MOV32_IMM(R0, 17366),
+ BPF_STX_MEM(BPF_W, R10, R0, -48),
+ BPF_LDX_MEM(BPF_W, R7, R10, -48),
+ BPF_LDX_MEM(BPF_W, R0, R10, -52),
+ BPF_ALU32_REG(BPF_SUB, R0, R7),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 2),
+ BPF_MOV32_IMM(R0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
+ { { 38, 256 } }
+ },
};
static struct net_device dev;
OpenPOWER on IntegriCloud