diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 3 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/bitmap.c | 5 | ||||
-rw-r--r-- | lib/bucket_locks.c | 5 | ||||
-rw-r--r-- | lib/idr.c | 10 | ||||
-rw-r--r-- | lib/iov_iter.c | 61 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 178 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 75 | ||||
-rw-r--r-- | lib/percpu_ida.c | 63 | ||||
-rw-r--r-- | lib/reed_solomon/decode_rs.c | 34 | ||||
-rw-r--r-- | lib/reed_solomon/encode_rs.c | 15 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 240 | ||||
-rw-r--r-- | lib/rhashtable.c | 51 | ||||
-rw-r--r-- | lib/test_bpf.c | 595 | ||||
-rw-r--r-- | lib/test_overflow.c | 417 | ||||
-rw-r--r-- | lib/test_printf.c | 2 | ||||
-rw-r--r-- | lib/ucs2_string.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 133 |
19 files changed, 1307 insertions, 586 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 7a913937888b..abc111eb5054 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -621,6 +621,9 @@ config ARCH_HAS_PMEM_API config ARCH_HAS_UACCESS_FLUSHCACHE bool +config ARCH_HAS_UACCESS_MCSAFE + bool + config STACKDEPOT bool select STACKTRACE diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 76555479ae36..eb885942eb0f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1802,6 +1802,9 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" +config TEST_OVERFLOW + tristate "Test check_*_overflow() functions at runtime" + config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" default n diff --git a/lib/Makefile b/lib/Makefile index 9f18c8152281..84c6dcb31fbb 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -60,6 +60,7 @@ UBSAN_SANITIZE_test_ubsan.o := y obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_TEST_LKM) += test_module.o +obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_SORT) += test_sort.o obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o diff --git a/lib/bitmap.c b/lib/bitmap.c index a42eff7e8c48..58f9750e49c6 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -64,12 +64,9 @@ EXPORT_SYMBOL(__bitmap_equal); void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) { - unsigned int k, lim = bits/BITS_PER_LONG; + unsigned int k, lim = BITS_TO_LONGS(bits); for (k = 0; k < lim; ++k) dst[k] = ~src[k]; - - if (bits % BITS_PER_LONG) - dst[k] = ~src[k]; } EXPORT_SYMBOL(__bitmap_complement); diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c index 266a97c5708b..ade3ce6c4af6 100644 --- a/lib/bucket_locks.c +++ b/lib/bucket_locks.c @@ -30,10 +30,7 @@ int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, } if (sizeof(spinlock_t) != 0) { - if (gfpflags_allow_blocking(gfp)) - tlocks = kvmalloc(size * sizeof(spinlock_t), gfp); - else - tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp); + tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); if (!tlocks) return -ENOMEM; for (i = 0; i < size; i++) diff --git a/lib/idr.c b/lib/idr.c index 823b813f08f8..ed9c169c12bd 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -4,9 +4,9 @@ #include <linux/idr.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/xarray.h> DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); -static DEFINE_SPINLOCK(simple_ida_lock); /** * idr_alloc_u32() - Allocate an ID. @@ -581,7 +581,7 @@ again: if (!ida_pre_get(ida, gfp_mask)) return -ENOMEM; - spin_lock_irqsave(&simple_ida_lock, flags); + xa_lock_irqsave(&ida->ida_rt, flags); ret = ida_get_new_above(ida, start, &id); if (!ret) { if (id > max) { @@ -591,7 +591,7 @@ again: ret = id; } } - spin_unlock_irqrestore(&simple_ida_lock, flags); + xa_unlock_irqrestore(&ida->ida_rt, flags); if (unlikely(ret == -EAGAIN)) goto again; @@ -615,8 +615,8 @@ void ida_simple_remove(struct ida *ida, unsigned int id) unsigned long flags; BUG_ON((int)id < 0); - spin_lock_irqsave(&simple_ida_lock, flags); + xa_lock_irqsave(&ida->ida_rt, flags); ida_remove(ida, id); - spin_unlock_irqrestore(&simple_ida_lock, flags); + xa_unlock_irqrestore(&ida->ida_rt, flags); } EXPORT_SYMBOL(ida_simple_remove); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index fdae394172fa..7e43cd54c84c 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) } EXPORT_SYMBOL(_copy_to_iter); +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +static int copyout_mcsafe(void __user *to, const void *from, size_t n) +{ + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); + n = copy_to_user_mcsafe((__force void *) to, from, n); + } + return n; +} + +static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, + const char *from, size_t len) +{ + unsigned long ret; + char *to; + + to = kmap_atomic(page); + ret = memcpy_mcsafe(to + offset, from, len); + kunmap_atomic(to); + + return ret; +} + +size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) +{ + const char *from = addr; + unsigned long rem, curr_addr, s_addr = (unsigned long) addr; + + if (unlikely(i->type & ITER_PIPE)) { + WARN_ON(1); + return 0; + } + if (iter_is_iovec(i)) + might_fault(); + iterate_and_advance(i, bytes, v, + copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), + ({ + rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, + (from += v.bv_len) - v.bv_len, v.bv_len); + if (rem) { + curr_addr = (unsigned long) from; + bytes = curr_addr - s_addr - rem; + return bytes; + } + }), + ({ + rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, + v.iov_len); + if (rem) { + curr_addr = (unsigned long) from; + bytes = curr_addr - s_addr - rem; + return bytes; + } + }) + ) + + return bytes; +} +EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); +#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ + size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 15ea216a67ce..63d0816ab23b 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -22,6 +22,7 @@ #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> +#include <linux/uidgid.h> #include <linux/uuid.h> #include <linux/ctype.h> #include <net/sock.h> @@ -231,30 +232,6 @@ out: return r; } -#ifdef CONFIG_NET -static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) -{ - struct kobject *kobj = data, *ksobj; - const struct kobj_ns_type_operations *ops; - - ops = kobj_ns_ops(kobj); - if (!ops && kobj->kset) { - ksobj = &kobj->kset->kobj; - if (ksobj->parent != NULL) - ops = kobj_ns_ops(ksobj->parent); - } - - if (ops && ops->netlink_ns && kobj->ktype->namespace) { - const void *sock_ns, *ns; - ns = kobj->ktype->namespace(kobj); - sock_ns = ops->netlink_ns(dsk); - return sock_ns != ns; - } - - return 0; -} -#endif - #ifdef CONFIG_UEVENT_HELPER static int kobj_usermode_filter(struct kobject *kobj) { @@ -296,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info) } #endif -static int kobject_uevent_net_broadcast(struct kobject *kobj, - struct kobj_uevent_env *env, +#ifdef CONFIG_NET +static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env, const char *action_string, const char *devpath) { - int retval = 0; -#if defined(CONFIG_NET) + struct netlink_skb_parms *parms; + struct sk_buff *skb = NULL; + char *scratch; + size_t len; + + /* allocate message with maximum possible size */ + len = strlen(action_string) + strlen(devpath) + 2; + skb = alloc_skb(len + env->buflen, GFP_KERNEL); + if (!skb) + return NULL; + + /* add header */ + scratch = skb_put(skb, len); + sprintf(scratch, "%s@%s", action_string, devpath); + + skb_put_data(skb, env->buf, env->buflen); + + parms = &NETLINK_CB(skb); + parms->creds.uid = GLOBAL_ROOT_UID; + parms->creds.gid = GLOBAL_ROOT_GID; + parms->dst_group = 1; + parms->portid = 0; + + return skb; +} + +static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, + const char *action_string, + const char *devpath) +{ struct sk_buff *skb = NULL; struct uevent_sock *ue_sk; + int retval = 0; /* send netlink message */ list_for_each_entry(ue_sk, &uevent_sock_list, list) { @@ -314,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, continue; if (!skb) { - /* allocate message with the maximum possible size */ - size_t len = strlen(action_string) + strlen(devpath) + 2; - char *scratch; - retval = -ENOMEM; - skb = alloc_skb(len + env->buflen, GFP_KERNEL); + skb = alloc_uevent_skb(env, action_string, devpath); if (!skb) continue; - - /* add header */ - scratch = skb_put(skb, len); - sprintf(scratch, "%s@%s", action_string, devpath); - - skb_put_data(skb, env->buf, env->buflen); - - NETLINK_CB(skb).dst_group = 1; } - retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), - 0, 1, GFP_KERNEL, - kobj_bcast_filter, - kobj); + retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1, + GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (retval == -ENOBUFS || retval == -ESRCH) retval = 0; } consume_skb(skb); -#endif + return retval; } +static int uevent_net_broadcast_tagged(struct sock *usk, + struct kobj_uevent_env *env, + const char *action_string, + const char *devpath) +{ + struct user_namespace *owning_user_ns = sock_net(usk)->user_ns; + struct sk_buff *skb = NULL; + int ret = 0; + + skb = alloc_uevent_skb(env, action_string, devpath); + if (!skb) + return -ENOMEM; + + /* fix credentials */ + if (owning_user_ns != &init_user_ns) { + struct netlink_skb_parms *parms = &NETLINK_CB(skb); + kuid_t root_uid; + kgid_t root_gid; + + /* fix uid */ + root_uid = make_kuid(owning_user_ns, 0); + if (uid_valid(root_uid)) + parms->creds.uid = root_uid; + + /* fix gid */ + root_gid = make_kgid(owning_user_ns, 0); + if (gid_valid(root_gid)) + parms->creds.gid = root_gid; + } + + ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL); + /* ENOBUFS should be handled in userspace */ + if (ret == -ENOBUFS || ret == -ESRCH) + ret = 0; + + return ret; +} +#endif + +static int kobject_uevent_net_broadcast(struct kobject *kobj, + struct kobj_uevent_env *env, + const char *action_string, + const char *devpath) +{ + int ret = 0; + +#ifdef CONFIG_NET + const struct kobj_ns_type_operations *ops; + const struct net *net = NULL; + + ops = kobj_ns_ops(kobj); + if (!ops && kobj->kset) { + struct kobject *ksobj = &kobj->kset->kobj; + if (ksobj->parent != NULL) + ops = kobj_ns_ops(ksobj->parent); + } + + /* kobjects currently only carry network namespace tags and they + * are the only tag relevant here since we want to decide which + * network namespaces to broadcast the uevent into. + */ + if (ops && ops->netlink_ns && kobj->ktype->namespace) + if (ops->type == KOBJ_NS_TYPE_NET) + net = kobj->ktype->namespace(kobj); + + if (!net) + ret = uevent_net_broadcast_untagged(env, action_string, + devpath); + else + ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env, + action_string, devpath); +#endif + + return ret; +} + static void zap_modalias_env(struct kobj_uevent_env *env) { static const char modalias_prefix[] = "MODALIAS="; @@ -703,9 +771,13 @@ static int uevent_net_init(struct net *net) net->uevent_sock = ue_sk; - mutex_lock(&uevent_sock_mutex); - list_add_tail(&ue_sk->list, &uevent_sock_list); - mutex_unlock(&uevent_sock_mutex); + /* Restrict uevents to initial user namespace. */ + if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { + mutex_lock(&uevent_sock_mutex); + list_add_tail(&ue_sk->list, &uevent_sock_list); + mutex_unlock(&uevent_sock_mutex); + } + return 0; } @@ -713,9 +785,11 @@ static void uevent_net_exit(struct net *net) { struct uevent_sock *ue_sk = net->uevent_sock; - mutex_lock(&uevent_sock_mutex); - list_del(&ue_sk->list); - mutex_unlock(&uevent_sock_mutex); + if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { + mutex_lock(&uevent_sock_mutex); + list_del(&ue_sk->list); + mutex_unlock(&uevent_sock_mutex); + } netlink_kernel_release(ue_sk->sk); kfree(ue_sk); diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 7eceeddb3fb8..c2d6f4efcfbc 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -65,13 +65,6 @@ typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ typedef int mpi_size_t; /* (must be a signed type) */ -static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) -{ - if (a->alloced < b) - return mpi_resize(a, b); - return 0; -} - /* Copy N limbs from S to D. */ #define MPN_COPY(d, s, n) \ do { \ @@ -80,13 +73,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) (d)[_i] = (s)[_i]; \ } while (0) -#define MPN_COPY_INCR(d, s, n) \ - do { \ - mpi_size_t _i; \ - for (_i = 0; _i < (n); _i++) \ - (d)[_i] = (s)[_i]; \ - } while (0) - #define MPN_COPY_DECR(d, s, n) \ do { \ mpi_size_t _i; \ @@ -111,15 +97,6 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) } \ } while (0) -#define MPN_NORMALIZE_NOT_ZERO(d, n) \ - do { \ - for (;;) { \ - if ((d)[(n)-1]) \ - break; \ - (n)--; \ - } \ - } while (0) - #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ do { \ if ((size) < KARATSUBA_THRESHOLD) \ @@ -128,46 +105,11 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) mul_n(prodp, up, vp, size, tspace); \ } while (0); -/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest - * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB). - * If this would yield overflow, DI should be the largest possible number - * (i.e., only ones). For correct operation, the most significant bit of D - * has to be set. Put the quotient in Q and the remainder in R. - */ -#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \ - do { \ - mpi_limb_t _q, _ql, _r; \ - mpi_limb_t _xh, _xl; \ - umul_ppmm(_q, _ql, (nh), (di)); \ - _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \ - umul_ppmm(_xh, _xl, _q, (d)); \ - sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \ - if (_xh) { \ - sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ - _q++; \ - if (_xh) { \ - sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ - _q++; \ - } \ - } \ - if (_r >= (d)) { \ - _r -= (d); \ - _q++; \ - } \ - (r) = _r; \ - (q) = _q; \ - } while (0) - /*-- mpiutil.c --*/ mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs); void mpi_free_limb_space(mpi_ptr_t a); void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs); -/*-- mpi-bit.c --*/ -void mpi_rshift_limbs(MPI a, unsigned int count); -int mpi_lshift_limbs(MPI a, unsigned int count); - -/*-- mpihelp-add.c --*/ static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, @@ -175,7 +117,6 @@ mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_ptr_t s2_ptr, mpi_size_t s2_size); -/*-- mpihelp-sub.c --*/ static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, @@ -183,10 +124,10 @@ mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_ptr_t s2_ptr, mpi_size_t s2_size); -/*-- mpihelp-cmp.c --*/ +/*-- mpih-cmp.c --*/ int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size); -/*-- mpihelp-mul.c --*/ +/*-- mpih-mul.c --*/ struct karatsuba_ctx { struct karatsuba_ctx *next; @@ -202,7 +143,6 @@ mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); -int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size); int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result); void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size); @@ -214,21 +154,16 @@ int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpi_ptr_t vp, mpi_size_t vsize, struct karatsuba_ctx *ctx); -/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/ +/*-- generic_mpih-mul1.c --*/ mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); -/*-- mpihelp-div.c --*/ -mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, - mpi_limb_t divisor_limb); +/*-- mpih-div.c --*/ mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize); -mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr, - mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, - mpi_limb_t divisor_limb); -/*-- mpihelp-shift.c --*/ +/*-- generic_mpih-[lr]shift.c --*/ mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt); mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6016f1deb1f5..9bbd9c5d375a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -112,18 +112,6 @@ static inline void alloc_global_tags(struct percpu_ida *pool, min(pool->nr_free, pool->percpu_batch_size)); } -static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) -{ - int tag = -ENOSPC; - - spin_lock(&tags->lock); - if (tags->nr_free) - tag = tags->freelist[--tags->nr_free]; - spin_unlock(&tags->lock); - - return tag; -} - /** * percpu_ida_alloc - allocate a tag * @pool: pool to allocate from @@ -147,20 +135,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) DEFINE_WAIT(wait); struct percpu_ida_cpu *tags; unsigned long flags; - int tag; + int tag = -ENOSPC; - local_irq_save(flags); - tags = this_cpu_ptr(pool->tag_cpu); + tags = raw_cpu_ptr(pool->tag_cpu); + spin_lock_irqsave(&tags->lock, flags); /* Fastpath */ - tag = alloc_local_tag(tags); - if (likely(tag >= 0)) { - local_irq_restore(flags); + if (likely(tags->nr_free >= 0)) { + tag = tags->freelist[--tags->nr_free]; + spin_unlock_irqrestore(&tags->lock, flags); return tag; } + spin_unlock_irqrestore(&tags->lock, flags); while (1) { - spin_lock(&pool->lock); + spin_lock_irqsave(&pool->lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); /* * prepare_to_wait() must come before steal_tags(), in case @@ -184,8 +174,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) &pool->cpus_have_tags); } - spin_unlock(&pool->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&pool->lock, flags); if (tag >= 0 || state == TASK_RUNNING) break; @@ -196,9 +185,6 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) } schedule(); - - local_irq_save(flags); - tags = this_cpu_ptr(pool->tag_cpu); } if (state != TASK_RUNNING) finish_wait(&pool->wait, &wait); @@ -222,28 +208,24 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) BUG_ON(tag >= pool->nr_tags); - local_irq_save(flags); - tags = this_cpu_ptr(pool->tag_cpu); + tags = raw_cpu_ptr(pool->tag_cpu); - spin_lock(&tags->lock); + spin_lock_irqsave(&tags->lock, flags); tags->freelist[tags->nr_free++] = tag; nr_free = tags->nr_free; - spin_unlock(&tags->lock); if (nr_free == 1) { cpumask_set_cpu(smp_processor_id(), &pool->cpus_have_tags); wake_up(&pool->wait); } + spin_unlock_irqrestore(&tags->lock, flags); if (nr_free == pool->percpu_max_size) { - spin_lock(&pool->lock); + spin_lock_irqsave(&pool->lock, flags); + spin_lock(&tags->lock); - /* - * Global lock held and irqs disabled, don't need percpu - * lock - */ if (tags->nr_free == pool->percpu_max_size) { move_tags(pool->freelist, &pool->nr_free, tags->freelist, &tags->nr_free, @@ -251,10 +233,9 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) wake_up(&pool->wait); } - spin_unlock(&pool->lock); + spin_unlock(&tags->lock); + spin_unlock_irqrestore(&pool->lock, flags); } - - local_irq_restore(flags); } EXPORT_SYMBOL_GPL(percpu_ida_free); @@ -346,29 +327,27 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, struct percpu_ida_cpu *remote; unsigned cpu, i, err = 0; - local_irq_save(flags); for_each_possible_cpu(cpu) { remote = per_cpu_ptr(pool->tag_cpu, cpu); - spin_lock(&remote->lock); + spin_lock_irqsave(&remote->lock, flags); for (i = 0; i < remote->nr_free; i++) { err = fn(remote->freelist[i], data); if (err) break; } - spin_unlock(&remote->lock); + spin_unlock_irqrestore(&remote->lock, flags); if (err) goto out; } - spin_lock(&pool->lock); + spin_lock_irqsave(&pool->lock, flags); for (i = 0; i < pool->nr_free; i++) { err = fn(pool->freelist[i], data); if (err) break; } - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&pool->lock, flags); out: - local_irq_restore(flags); return err; } EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index 0ec3f257ffdf..1db74eb098d0 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c @@ -1,22 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * lib/reed_solomon/decode_rs.c - * - * Overview: - * Generic Reed Solomon encoder / decoder library + * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * - * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ - * - */ - -/* Generic data width independent code which is included by the - * wrappers. + * Generic data width independent code which is included by the wrappers. */ { + struct rs_codec *rs = rsc->codec; int deg_lambda, el, deg_omega; int i, j, r, k, pad; int nn = rs->nn; @@ -27,16 +21,22 @@ uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; - /* Err+Eras Locator poly and syndrome poly The maximum value - * of nroots is 8. So the necessary stack size will be about - * 220 bytes max. - */ - uint16_t lambda[nroots + 1], syn[nroots]; - uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1]; - uint16_t root[nroots], reg[nroots + 1], loc[nroots]; int count = 0; uint16_t msk = (uint16_t) rs->nn; + /* + * The decoder buffers are in the rs control struct. They are + * arrays sized [nroots + 1] + */ + uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1); + uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1); + uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1); + uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1); + uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1); + uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1); + uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1); + uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1); + /* Check length parameter for validity */ pad = nn - nroots - len; BUG_ON(pad < 0 || pad >= nn); diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c index 0b5b1a6728ec..9112d46e869e 100644 --- a/lib/reed_solomon/encode_rs.c +++ b/lib/reed_solomon/encode_rs.c @@ -1,23 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * lib/reed_solomon/encode_rs.c - * - * Overview: - * Generic Reed Solomon encoder / decoder library + * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * - * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $ - * - */ - -/* Generic data width independent code which is included by the - * wrappers. - * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) + * Generic data width independent code which is included by the wrappers. */ { + struct rs_codec *rs = rsc->codec; int i, j, pad; int nn = rs->nn; int nroots = rs->nroots; diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 06d04cfa9339..dfcf54242fb9 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -1,43 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * lib/reed_solomon/reed_solomon.c - * - * Overview: - * Generic Reed Solomon encoder / decoder library + * Generic Reed Solomon encoder / decoder library * * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) * * Reed Solomon code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q * - * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Description: * * The generic Reed Solomon library provides runtime configurable * encoding / decoding of RS codes. - * Each user must call init_rs to get a pointer to a rs_control - * structure for the given rs parameters. This structure is either - * generated or a already available matching control structure is used. - * If a structure is generated then the polynomial arrays for - * fast encoding / decoding are built. This can take some time so - * make sure not to call this function from a time critical path. - * Usually a module / driver should initialize the necessary - * rs_control structure on module / driver init and release it - * on exit. - * The encoding puts the calculated syndrome into a given syndrome - * buffer. - * The decoding is a two step process. The first step calculates - * the syndrome over the received (data + syndrome) and calls the - * second stage, which does the decoding / error correction itself. - * Many hw encoders provide a syndrome calculation over the received - * data + syndrome and can call the second stage directly. * + * Each user must call init_rs to get a pointer to a rs_control structure + * for the given rs parameters. The control struct is unique per instance. + * It points to a codec which can be shared by multiple control structures. + * If a codec is newly allocated then the polynomial arrays for fast + * encoding / decoding are built. This can take some time so make sure not + * to call this function from a time critical path. Usually a module / + * driver should initialize the necessary rs_control structure on module / + * driver init and release it on exit. + * + * The encoding puts the calculated syndrome into a given syndrome buffer. + * + * The decoding is a two step process. The first step calculates the + * syndrome over the received (data + syndrome) and calls the second stage, + * which does the decoding / error correction itself. Many hw encoders + * provide a syndrome calculation over the received data + syndrome and can + * call the second stage directly. */ - #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> @@ -46,32 +37,44 @@ #include <linux/slab.h> #include <linux/mutex.h> -/* This list holds all currently allocated rs control structures */ -static LIST_HEAD (rslist); +enum { + RS_DECODE_LAMBDA, + RS_DECODE_SYN, + RS_DECODE_B, + RS_DECODE_T, + RS_DECODE_OMEGA, + RS_DECODE_ROOT, + RS_DECODE_REG, + RS_DECODE_LOC, + RS_DECODE_NUM_BUFFERS +}; + +/* This list holds all currently allocated rs codec structures */ +static LIST_HEAD(codec_list); /* Protection for the list */ static DEFINE_MUTEX(rslistlock); /** - * rs_init - Initialize a Reed-Solomon codec + * codec_init - Initialize a Reed-Solomon codec * @symsize: symbol size, bits (1-8) * @gfpoly: Field generator polynomial coefficients * @gffunc: Field generator function * @fcr: first root of RS code generator polynomial, index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) + * @gfp: GFP_ flags for allocations * - * Allocate a control structure and the polynom arrays for faster + * Allocate a codec structure and the polynom arrays for faster * en/decoding. Fill the arrays according to the given parameters. */ -static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), - int fcr, int prim, int nroots) +static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int), + int fcr, int prim, int nroots, gfp_t gfp) { - struct rs_control *rs; int i, j, sr, root, iprim; + struct rs_codec *rs; - /* Allocate the control structure */ - rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL); - if (rs == NULL) + rs = kzalloc(sizeof(*rs), gfp); + if (!rs) return NULL; INIT_LIST_HEAD(&rs->list); @@ -85,17 +88,17 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), rs->gffunc = gffunc; /* Allocate the arrays */ - rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); + rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp); if (rs->alpha_to == NULL) - goto errrs; + goto err; - rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); + rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp); if (rs->index_of == NULL) - goto erralp; + goto err; - rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL); + rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), gfp); if(rs->genpoly == NULL) - goto erridx; + goto err; /* Generate Galois field lookup tables */ rs->index_of[0] = rs->nn; /* log(zero) = -inf */ @@ -120,7 +123,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), } /* If it's not primitive, exit */ if(sr != rs->alpha_to[0]) - goto errpol; + goto err; /* Find prim-th root of 1, used in decoding */ for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn); @@ -148,42 +151,52 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int (*gffunc)(int), /* convert rs->genpoly[] to index form for quicker encoding */ for (i = 0; i <= nroots; i++) rs->genpoly[i] = rs->index_of[rs->genpoly[i]]; + + rs->users = 1; + list_add(&rs->list, &codec_list); return rs; - /* Error exit */ -errpol: +err: kfree(rs->genpoly); -erridx: kfree(rs->index_of); -erralp: kfree(rs->alpha_to); -errrs: kfree(rs); return NULL; } /** - * free_rs - Free the rs control structure, if it is no longer used - * @rs: the control structure which is not longer used by the + * free_rs - Free the rs control structure + * @rs: The control structure which is not longer used by the * caller + * + * Free the control structure. If @rs is the last user of the associated + * codec, free the codec as well. */ void free_rs(struct rs_control *rs) { + struct rs_codec *cd; + + if (!rs) + return; + + cd = rs->codec; mutex_lock(&rslistlock); - rs->users--; - if(!rs->users) { - list_del(&rs->list); - kfree(rs->alpha_to); - kfree(rs->index_of); - kfree(rs->genpoly); - kfree(rs); + cd->users--; + if(!cd->users) { + list_del(&cd->list); + kfree(cd->alpha_to); + kfree(cd->index_of); + kfree(cd->genpoly); + kfree(cd); } mutex_unlock(&rslistlock); + kfree(rs); } +EXPORT_SYMBOL_GPL(free_rs); /** - * init_rs_internal - Find a matching or allocate a new rs control structure + * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial @@ -191,55 +204,69 @@ void free_rs(struct rs_control *rs) * @gffunc: pointer to function to generate the next field element, * or the multiplicative identity element if given 0. Used * instead of gfpoly if gfpoly is 0 - * @fcr: the first consecutive root of the rs code generator polynomial + * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) + * @gfp: GFP_ flags for allocations */ static struct rs_control *init_rs_internal(int symsize, int gfpoly, - int (*gffunc)(int), int fcr, - int prim, int nroots) + int (*gffunc)(int), int fcr, + int prim, int nroots, gfp_t gfp) { - struct list_head *tmp; - struct rs_control *rs; + struct list_head *tmp; + struct rs_control *rs; + unsigned int bsize; /* Sanity checks */ if (symsize < 1) return NULL; if (fcr < 0 || fcr >= (1<<symsize)) - return NULL; + return NULL; if (prim <= 0 || prim >= (1<<symsize)) - return NULL; + return NULL; if (nroots < 0 || nroots >= (1<<symsize)) return NULL; + /* + * The decoder needs buffers in each control struct instance to + * avoid variable size or large fixed size allocations on + * stack. Size the buffers to arrays of [nroots + 1]. + */ + bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1); + rs = kzalloc(sizeof(*rs) + bsize, gfp); + if (!rs) + return NULL; + mutex_lock(&rslistlock); /* Walk through the list and look for a matching entry */ - list_for_each(tmp, &rslist) { - rs = list_entry(tmp, struct rs_control, list); - if (symsize != rs->mm) + list_for_each(tmp, &codec_list) { + struct rs_codec *cd = list_entry(tmp, struct rs_codec, list); + + if (symsize != cd->mm) continue; - if (gfpoly != rs->gfpoly) + if (gfpoly != cd->gfpoly) continue; - if (gffunc != rs->gffunc) + if (gffunc != cd->gffunc) continue; - if (fcr != rs->fcr) + if (fcr != cd->fcr) continue; - if (prim != rs->prim) + if (prim != cd->prim) continue; - if (nroots != rs->nroots) + if (nroots != cd->nroots) continue; /* We have a matching one already */ - rs->users++; + cd->users++; + rs->codec = cd; goto out; } /* Create a new one */ - rs = rs_init(symsize, gfpoly, gffunc, fcr, prim, nroots); - if (rs) { - rs->users = 1; - list_add(&rs->list, &rslist); + rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp); + if (!rs->codec) { + kfree(rs); + rs = NULL; } out: mutex_unlock(&rslistlock); @@ -247,45 +274,48 @@ out: } /** - * init_rs - Find a matching or allocate a new rs control structure + * init_rs_gfp - Create a RS control struct and initialize it * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial * must be primitive; - * @fcr: the first consecutive root of the rs code generator polynomial + * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) + * @gfp: GFP_ flags for allocations */ -struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, - int nroots) +struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim, + int nroots, gfp_t gfp) { - return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots); + return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp); } +EXPORT_SYMBOL_GPL(init_rs_gfp); /** - * init_rs_non_canonical - Find a matching or allocate a new rs control - * structure, for fields with non-canonical - * representation + * init_rs_non_canonical - Allocate rs control struct for fields with + * non-canonical representation * @symsize: the symbol size (number of bits) * @gffunc: pointer to function to generate the next field element, * or the multiplicative identity element if given 0. Used * instead of gfpoly if gfpoly is 0 - * @fcr: the first consecutive root of the rs code generator polynomial + * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) */ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), - int fcr, int prim, int nroots) + int fcr, int prim, int nroots) { - return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots); + return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots, + GFP_KERNEL); } +EXPORT_SYMBOL_GPL(init_rs_non_canonical); #ifdef CONFIG_REED_SOLOMON_ENC8 /** * encode_rs8 - Calculate the parity for data values (8bit data width) - * @rs: the rs control structure + * @rsc: the rs control structure * @data: data field of a given type * @len: data length * @par: parity data, must be initialized by caller (usually all 0) @@ -295,7 +325,7 @@ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), * symbol size > 8. The calling code must take care of encoding of the * syndrome result for storage itself. */ -int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, +int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" @@ -306,7 +336,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); #ifdef CONFIG_REED_SOLOMON_DEC8 /** * decode_rs8 - Decode codeword (8bit data width) - * @rs: the rs control structure + * @rsc: the rs control structure * @data: data field of a given type * @par: received parity data field * @len: data length @@ -319,9 +349,14 @@ EXPORT_SYMBOL_GPL(encode_rs8); * The syndrome and parity uses a uint16_t data type to enable * symbol size > 8. The calling code must take care of decoding of the * syndrome result and the received parity before calling this code. + * + * Note: The rs_control struct @rsc contains buffers which are used for + * decoding, so the caller has to ensure that decoder invocations are + * serialized. + * * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. */ -int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, +int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len, uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { @@ -333,7 +368,7 @@ EXPORT_SYMBOL_GPL(decode_rs8); #ifdef CONFIG_REED_SOLOMON_ENC16 /** * encode_rs16 - Calculate the parity for data values (16bit data width) - * @rs: the rs control structure + * @rsc: the rs control structure * @data: data field of a given type * @len: data length * @par: parity data, must be initialized by caller (usually all 0) @@ -341,7 +376,7 @@ EXPORT_SYMBOL_GPL(decode_rs8); * * Each field in the data array contains up to symbol size bits of valid data. */ -int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, +int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" @@ -352,7 +387,7 @@ EXPORT_SYMBOL_GPL(encode_rs16); #ifdef CONFIG_REED_SOLOMON_DEC16 /** * decode_rs16 - Decode codeword (16bit data width) - * @rs: the rs control structure + * @rsc: the rs control structure * @data: data field of a given type * @par: received parity data field * @len: data length @@ -363,9 +398,14 @@ EXPORT_SYMBOL_GPL(encode_rs16); * @corr: buffer to store correction bitmask on eras_pos * * Each field in the data array contains up to symbol size bits of valid data. + * + * Note: The rc_control struct @rsc contains buffers which are used for + * decoding, so the caller has to ensure that decoder invocations are + * serialized. + * * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. */ -int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, +int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len, uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { @@ -374,10 +414,6 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, EXPORT_SYMBOL_GPL(decode_rs16); #endif -EXPORT_SYMBOL_GPL(init_rs); -EXPORT_SYMBOL_GPL(init_rs_non_canonical); -EXPORT_SYMBOL_GPL(free_rs); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Reed Solomon encoder/decoder"); MODULE_AUTHOR("Phil Karn, Thomas Gleixner"); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 2b2b79974b61..9427b5766134 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -668,8 +668,9 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * For a completely stable walk you should construct your own data * structure outside the hash table. * - * This function may sleep so you must not call it from interrupt - * context or with spin locks held. + * This function may be called from any process context, including + * non-preemptable context, but cannot be called from softirq or + * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ @@ -726,6 +727,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU) { struct rhashtable *ht = iter->ht; + bool rhlist = ht->rhlist; rcu_read_lock(); @@ -734,11 +736,52 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter) list_del(&iter->walker.list); spin_unlock(&ht->lock); - if (!iter->walker.tbl && !iter->end_of_table) { + if (iter->end_of_table) + return 0; + if (!iter->walker.tbl) { iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); + iter->slot = 0; + iter->skip = 0; return -EAGAIN; } + if (iter->p && !rhlist) { + /* + * We need to validate that 'p' is still in the table, and + * if so, update 'skip' + */ + struct rhash_head *p; + int skip = 0; + rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { + skip++; + if (p == iter->p) { + iter->skip = skip; + goto found; + } + } + iter->p = NULL; + } else if (iter->p && rhlist) { + /* Need to validate that 'list' is still in the table, and + * if so, update 'skip' and 'p'. + */ + struct rhash_head *p; + struct rhlist_head *list; + int skip = 0; + rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { + for (list = container_of(p, struct rhlist_head, rhead); + list; + list = rcu_dereference(list->next)) { + skip++; + if (list == iter->list) { + iter->p = p; + skip = skip; + goto found; + } + } + } + iter->p = NULL; + } +found: return 0; } EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); @@ -914,8 +957,6 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) iter->walker.tbl = NULL; spin_unlock(&ht->lock); - iter->p = NULL; - out: rcu_read_unlock(); } diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 8e157806df7a..60aedc879361 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -356,29 +356,22 @@ static int bpf_fill_maxinsns11(struct bpf_test *self) return __bpf_fill_ja(self, BPF_MAXINSNS, 68); } -static int bpf_fill_ja(struct bpf_test *self) -{ - /* Hits exactly 11 passes on x86_64 JIT. */ - return __bpf_fill_ja(self, 12, 9); -} - -static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) +static int bpf_fill_maxinsns12(struct bpf_test *self) { unsigned int len = BPF_MAXINSNS; struct sock_filter *insn; - int i; + int i = 0; insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); if (!insn) return -ENOMEM; - for (i = 0; i < len - 1; i += 2) { - insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); - insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, - SKF_AD_OFF + SKF_AD_CPU); - } + insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); - insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); + for (i = 1; i < len - 1; i++) + insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); self->u.ptr.insns = insn; self->u.ptr.len = len; @@ -386,50 +379,22 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) return 0; } -#define PUSH_CNT 68 -/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */ -static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) +static int bpf_fill_maxinsns13(struct bpf_test *self) { unsigned int len = BPF_MAXINSNS; - struct bpf_insn *insn; - int i = 0, j, k = 0; + struct sock_filter *insn; + int i = 0; insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); if (!insn) return -ENOMEM; - insn[i++] = BPF_MOV64_REG(R6, R1); -loop: - for (j = 0; j < PUSH_CNT; j++) { - insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); - i++; - insn[i++] = BPF_MOV64_REG(R1, R6); - insn[i++] = BPF_MOV64_IMM(R2, 1); - insn[i++] = BPF_MOV64_IMM(R3, 2); - insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - bpf_skb_vlan_push_proto.func - __bpf_call_base); - insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); - i++; - } - - for (j = 0; j < PUSH_CNT; j++) { - insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2); - i++; - insn[i++] = BPF_MOV64_REG(R1, R6); - insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - bpf_skb_vlan_pop_proto.func - __bpf_call_base); - insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2); - i++; - } - if (++k < 5) - goto loop; + for (i = 0; i < len - 3; i++) + insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0); - for (; i < len - 1; i++) - insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef); - - insn[len - 1] = BPF_EXIT_INSN(); + insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab); + insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0); + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); self->u.ptr.insns = insn; self->u.ptr.len = len; @@ -437,58 +402,29 @@ loop: return 0; } -static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self) +static int bpf_fill_ja(struct bpf_test *self) { - struct bpf_insn *insn; - - insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL); - if (!insn) - return -ENOMEM; - - /* Due to func address being non-const, we need to - * assemble this here. - */ - insn[0] = BPF_MOV64_REG(R6, R1); - insn[1] = BPF_LD_ABS(BPF_B, 0); - insn[2] = BPF_LD_ABS(BPF_H, 0); - insn[3] = BPF_LD_ABS(BPF_W, 0); - insn[4] = BPF_MOV64_REG(R7, R6); - insn[5] = BPF_MOV64_IMM(R6, 0); - insn[6] = BPF_MOV64_REG(R1, R7); - insn[7] = BPF_MOV64_IMM(R2, 1); - insn[8] = BPF_MOV64_IMM(R3, 2); - insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - bpf_skb_vlan_push_proto.func - __bpf_call_base); - insn[10] = BPF_MOV64_REG(R6, R7); - insn[11] = BPF_LD_ABS(BPF_B, 0); - insn[12] = BPF_LD_ABS(BPF_H, 0); - insn[13] = BPF_LD_ABS(BPF_W, 0); - insn[14] = BPF_MOV64_IMM(R0, 42); - insn[15] = BPF_EXIT_INSN(); - - self->u.ptr.insns = insn; - self->u.ptr.len = 16; - - return 0; + /* Hits exactly 11 passes on x86_64 JIT. */ + return __bpf_fill_ja(self, 12, 9); } -static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) +static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self) { unsigned int len = BPF_MAXINSNS; - struct bpf_insn *insn; - int i = 0; + struct sock_filter *insn; + int i; insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); if (!insn) return -ENOMEM; - insn[i++] = BPF_MOV64_REG(R6, R1); - insn[i++] = BPF_LD_ABS(BPF_B, 0); - insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); - i++; - while (i < len - 1) - insn[i++] = BPF_LD_ABS(BPF_B, 1); - insn[i] = BPF_EXIT_INSN(); + for (i = 0; i < len - 1; i += 2) { + insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0); + insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, + SKF_AD_OFF + SKF_AD_CPU); + } + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee); self->u.ptr.insns = insn; self->u.ptr.len = len; @@ -1988,40 +1924,6 @@ static struct bpf_test tests[] = { { { 0, -1 } } }, { - "INT: DIV + ABS", - .u.insns_int = { - BPF_ALU64_REG(BPF_MOV, R6, R1), - BPF_LD_ABS(BPF_B, 3), - BPF_ALU64_IMM(BPF_MOV, R2, 2), - BPF_ALU32_REG(BPF_DIV, R0, R2), - BPF_ALU64_REG(BPF_MOV, R8, R0), - BPF_LD_ABS(BPF_B, 4), - BPF_ALU64_REG(BPF_ADD, R8, R0), - BPF_LD_IND(BPF_B, R8, -70), - BPF_EXIT_INSN(), - }, - INTERNAL, - { 10, 20, 30, 40, 50 }, - { { 4, 0 }, { 5, 10 } } - }, - { - /* This one doesn't go through verifier, but is just raw insn - * as opposed to cBPF tests from here. Thus div by 0 tests are - * done in test_verifier in BPF kselftests. - */ - "INT: DIV by -1", - .u.insns_int = { - BPF_ALU64_REG(BPF_MOV, R6, R1), - BPF_ALU64_IMM(BPF_MOV, R7, -1), - BPF_LD_ABS(BPF_B, 3), - BPF_ALU32_REG(BPF_DIV, R0, R7), - BPF_EXIT_INSN(), - }, - INTERNAL, - { 10, 20, 30, 40, 50 }, - { { 3, 0 }, { 4, 0 } } - }, - { "check: missing ret", .u.insns = { BPF_STMT(BPF_LD | BPF_IMM, 1), @@ -2383,50 +2285,6 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } } }, - { - "nmap reduced", - .u.insns_int = { - BPF_MOV64_REG(R6, R1), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, R0, 0x806, 28), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, R0, 0x806, 26), - BPF_MOV32_IMM(R0, 18), - BPF_STX_MEM(BPF_W, R10, R0, -64), - BPF_LDX_MEM(BPF_W, R7, R10, -64), - BPF_LD_IND(BPF_W, R7, 14), - BPF_STX_MEM(BPF_W, R10, R0, -60), - BPF_MOV32_IMM(R0, 280971478), - BPF_STX_MEM(BPF_W, R10, R0, -56), - BPF_LDX_MEM(BPF_W, R7, R10, -56), - BPF_LDX_MEM(BPF_W, R0, R10, -60), - BPF_ALU32_REG(BPF_SUB, R0, R7), - BPF_JMP_IMM(BPF_JNE, R0, 0, 15), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, R0, 0x806, 13), - BPF_MOV32_IMM(R0, 22), - BPF_STX_MEM(BPF_W, R10, R0, -56), - BPF_LDX_MEM(BPF_W, R7, R10, -56), - BPF_LD_IND(BPF_H, R7, 14), - BPF_STX_MEM(BPF_W, R10, R0, -52), - BPF_MOV32_IMM(R0, 17366), - BPF_STX_MEM(BPF_W, R10, R0, -48), - BPF_LDX_MEM(BPF_W, R7, R10, -48), - BPF_LDX_MEM(BPF_W, R0, R10, -52), - BPF_ALU32_REG(BPF_SUB, R0, R7), - BPF_JMP_IMM(BPF_JNE, R0, 0, 2), - BPF_MOV32_IMM(R0, 256), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(R0, 0), - BPF_EXIT_INSN(), - }, - INTERNAL, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, - { { 38, 256 } }, - .stack_depth = 64, - }, /* BPF_ALU | BPF_MOV | BPF_X */ { "ALU_MOV_X: dst = 2", @@ -5478,28 +5336,29 @@ static struct bpf_test tests[] = { .expected_errcode = -ENOTSUPP, }, { - "BPF_MAXINSNS: ld_abs+get_processor_id", - { }, - CLASSIC, + "BPF_MAXINSNS: jump over MSH", { }, - { { 1, 0xbee } }, - .fill_helper = bpf_fill_ld_abs_get_processor_id, + CLASSIC | FLAG_EXPECTED_FAIL, + { 0xfa, 0xfb, 0xfc, 0xfd, }, + { { 4, 0xabababab } }, + .fill_helper = bpf_fill_maxinsns12, + .expected_errcode = -EINVAL, }, { - "BPF_MAXINSNS: ld_abs+vlan_push/pop", + "BPF_MAXINSNS: exec all MSH", { }, - INTERNAL, - { 0x34 }, - { { ETH_HLEN, 0xbef } }, - .fill_helper = bpf_fill_ld_abs_vlan_push_pop, + CLASSIC, + { 0xfa, 0xfb, 0xfc, 0xfd, }, + { { 4, 0xababab83 } }, + .fill_helper = bpf_fill_maxinsns13, }, { - "BPF_MAXINSNS: jump around ld_abs", + "BPF_MAXINSNS: ld_abs+get_processor_id", { }, - INTERNAL, - { 10, 11 }, - { { 2, 10 } }, - .fill_helper = bpf_fill_jump_around_ld_abs, + CLASSIC, + { }, + { { 1, 0xbee } }, + .fill_helper = bpf_fill_ld_abs_get_processor_id, }, /* * LD_IND / LD_ABS on fragmented SKBs @@ -5683,6 +5542,53 @@ static struct bpf_test tests[] = { { {0x40, 0x05 } }, }, { + "LD_IND byte positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xff } }, + }, + { + "LD_IND byte positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_IND byte negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, + }, + { + "LD_IND byte negative offset, multiple calls", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3), + BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, + }, + { "LD_IND halfword positive offset", .u.insns = { BPF_STMT(BPF_LDX | BPF_IMM, 0x20), @@ -5731,6 +5637,39 @@ static struct bpf_test tests[] = { { {0x40, 0x66cc } }, }, { + "LD_IND halfword positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3d), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffff } }, + }, + { + "LD_IND halfword positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_IND halfword negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, + }, + { "LD_IND word positive offset", .u.insns = { BPF_STMT(BPF_LDX | BPF_IMM, 0x20), @@ -5821,6 +5760,39 @@ static struct bpf_test tests[] = { { {0x40, 0x66cc77dd } }, }, { + "LD_IND word positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3b), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffffffff } }, + }, + { + "LD_IND word positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_IND word negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_IMM, 0x3e), + BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 } }, + }, + { "LD_ABS byte", .u.insns = { BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20), @@ -5838,6 +5810,68 @@ static struct bpf_test tests[] = { { {0x40, 0xcc } }, }, { + "LD_ABS byte positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xff } }, + }, + { + "LD_ABS byte positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_ABS byte negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, + }, + { + "LD_ABS byte negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, + }, + { + "LD_ABS byte negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_ABS byte negative offset, multiple calls", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x82 }, }, + }, + { "LD_ABS halfword", .u.insns = { BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22), @@ -5872,6 +5906,55 @@ static struct bpf_test tests[] = { { {0x40, 0x99ff } }, }, { + "LD_ABS halfword positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffff } }, + }, + { + "LD_ABS halfword positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_ABS halfword negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, + }, + { + "LD_ABS halfword negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x1982 }, }, + }, + { + "LD_ABS halfword negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { "LD_ABS word", .u.insns = { BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c), @@ -5939,6 +6022,140 @@ static struct bpf_test tests[] = { }, { {0x40, 0x88ee99ff } }, }, + { + "LD_ABS word positive offset, all ff", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff }, + { {0x40, 0xffffffff } }, + }, + { + "LD_ABS word positive offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LD_ABS word negative offset, out of bounds load", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC | FLAG_EXPECTED_FAIL, + .expected_errcode = -EINVAL, + }, + { + "LD_ABS word negative offset, in bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x25051982 }, }, + }, + { + "LD_ABS word negative offset, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x3f, 0 }, }, + }, + { + "LDX_MSH standalone, preserved A", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0xffeebbaa }, }, + }, + { + "LDX_MSH standalone, preserved A 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x175e9d63 }, }, + }, + { + "LDX_MSH standalone, test result 1", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x14 }, }, + }, + { + "LDX_MSH standalone, test result 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x24 }, }, + }, + { + "LDX_MSH standalone, negative offset", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0 }, }, + }, + { + "LDX_MSH standalone, negative offset 2", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0x24 }, }, + }, + { + "LDX_MSH standalone, out of bounds", + .u.insns = { + BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa), + BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40), + BPF_STMT(BPF_MISC | BPF_TXA, 0), + BPF_STMT(BPF_RET | BPF_A, 0x0), + }, + CLASSIC, + { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 }, + { {0x40, 0 }, }, + }, /* * verify that the interpreter or JIT correctly sets A and X * to 0. @@ -6127,14 +6344,6 @@ static struct bpf_test tests[] = { {}, { {0x1, 0x42 } }, }, - { - "LD_ABS with helper changing skb data", - { }, - INTERNAL, - { 0x34 }, - { { ETH_HLEN, 42 } }, - .fill_helper = bpf_fill_ld_abs_vlan_push_pop2, - }, /* Checking interpreter vs JIT wrt signed extended imms. */ { "JNE signed compare, test 1", diff --git a/lib/test_overflow.c b/lib/test_overflow.c new file mode 100644 index 000000000000..aecbbb217305 --- /dev/null +++ b/lib/test_overflow.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Test cases for arithmetic overflow checks. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/overflow.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/vmalloc.h> + +#define DEFINE_TEST_ARRAY(t) \ + static const struct test_ ## t { \ + t a, b; \ + t sum, diff, prod; \ + bool s_of, d_of, p_of; \ + } t ## _tests[] __initconst + +DEFINE_TEST_ARRAY(u8) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U8_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, + {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, + {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, + {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, + {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, + + {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, + {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, + + {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, + {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, + {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, + {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, + + {48, 32, 80, 16, 0, false, false, true}, + {128, 128, 0, 0, 0, true, false, true}, + {123, 234, 101, 145, 110, true, true, true}, +}; +DEFINE_TEST_ARRAY(u16) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U16_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, + {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, + {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, + {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, + {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, + + {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, + {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, + + {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, + {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, + {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, + {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, + + {123, 234, 357, 65425, 28782, false, true, false}, + {1234, 2345, 3579, 64425, 10146, false, true, true}, +}; +DEFINE_TEST_ARRAY(u32) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U32_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, + {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, + {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, + {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, + {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, + + {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, + {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, + + {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, + {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, + {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, + {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, + + {-2U, 1U, -1U, -3U, -2U, false, false, false}, + {-4U, 5U, 1U, -9U, -20U, true, false, true}, +}; + +DEFINE_TEST_ARRAY(u64) = { + {0, 0, 0, 0, 0, false, false, false}, + {1, 1, 2, 0, 1, false, false, false}, + {0, 1, 1, U64_MAX, 0, false, true, false}, + {1, 0, 1, 1, 0, false, false, false}, + {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, + {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, + {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, + {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, + {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, + + {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, + {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, + + {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, + {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, + {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, + {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, + {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, + 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, + false, true, false}, + {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, +}; + +DEFINE_TEST_ARRAY(s8) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, + {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, + {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, + {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, + + {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, + {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, + {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, + {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, + {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, + {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, + + {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, + {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, + {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, + {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, + + {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, + {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, + + {-4, -32, -36, 28, -128, false, false, true}, + {-4, 32, 28, -36, -128, false, false, false}, +}; + +DEFINE_TEST_ARRAY(s16) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, + {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, + {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, + {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, + + {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, + {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, + {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, + {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, + {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, + {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, + + {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, + {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, + {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, + {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, + + {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, + {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, +}; +DEFINE_TEST_ARRAY(s32) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, + {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, + {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, + {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, + + {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, + {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, + {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, + {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, + {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, + {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, + + {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, + {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, + {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, + {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, + + {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, + {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, +}; +DEFINE_TEST_ARRAY(s64) = { + {0, 0, 0, 0, 0, false, false, false}, + + {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, + {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, + {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, + {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, + + {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, + {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, + {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, + {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, + {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, + {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, + + {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, + {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, + {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, + {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, + + {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, + {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, + + {-1, -1, -2, 0, 1, false, false, false}, + {-1, -128, -129, 127, 128, false, false, false}, + {-128, -1, -129, -127, 128, false, false, false}, + {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, +}; + +#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ + t _r; \ + bool _of; \ + \ + _of = check_ ## op ## _overflow(a, b, &_r); \ + if (_of != of) { \ + pr_warn("expected "fmt" "sym" "fmt \ + " to%s overflow (type %s)\n", \ + a, b, of ? "" : " not", #t); \ + err = 1; \ + } \ + if (_r != r) { \ + pr_warn("expected "fmt" "sym" "fmt" == " \ + fmt", got "fmt" (type %s)\n", \ + a, b, r, _r, #t); \ + err = 1; \ + } \ +} while (0) + +#define DEFINE_TEST_FUNC(t, fmt) \ +static int __init do_test_ ## t(const struct test_ ## t *p) \ +{ \ + int err = 0; \ + \ + check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ + check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ + check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ + check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ + check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ + \ + return err; \ +} \ + \ +static int __init test_ ## t ## _overflow(void) { \ + int err = 0; \ + unsigned i; \ + \ + pr_info("%-3s: %zu tests\n", #t, ARRAY_SIZE(t ## _tests)); \ + for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ + err |= do_test_ ## t(&t ## _tests[i]); \ + return err; \ +} + +DEFINE_TEST_FUNC(u8, "%d"); +DEFINE_TEST_FUNC(s8, "%d"); +DEFINE_TEST_FUNC(u16, "%d"); +DEFINE_TEST_FUNC(s16, "%d"); +DEFINE_TEST_FUNC(u32, "%u"); +DEFINE_TEST_FUNC(s32, "%d"); +#if BITS_PER_LONG == 64 +DEFINE_TEST_FUNC(u64, "%llu"); +DEFINE_TEST_FUNC(s64, "%lld"); +#endif + +static int __init test_overflow_calculation(void) +{ + int err = 0; + + err |= test_u8_overflow(); + err |= test_s8_overflow(); + err |= test_u16_overflow(); + err |= test_s16_overflow(); + err |= test_u32_overflow(); + err |= test_s32_overflow(); +#if BITS_PER_LONG == 64 + err |= test_u64_overflow(); + err |= test_s64_overflow(); +#endif + + return err; +} + +/* + * Deal with the various forms of allocator arguments. See comments above + * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". + */ +#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL) +#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE) +#define alloc000(alloc, arg, sz) alloc(sz) +#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) +#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL) +#define free0(free, arg, ptr) free(ptr) +#define free1(free, arg, ptr) free(arg, ptr) + +/* Wrap around to 8K */ +#define TEST_SIZE (9 << PAGE_SHIFT) + +#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ +static int __init test_ ## func (void *arg) \ +{ \ + volatile size_t a = TEST_SIZE; \ + volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ + void *ptr; \ + \ + /* Tiny allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ + if (!ptr) { \ + pr_warn(#func " failed regular allocation?!\n"); \ + return 1; \ + } \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Wrapped allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ + a * b); \ + if (!ptr) { \ + pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \ + return 1; \ + } \ + free ## want_arg (free_func, arg, ptr); \ + \ + /* Saturated allocation test. */ \ + ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ + array_size(a, b)); \ + if (ptr) { \ + pr_warn(#func " missed saturation!\n"); \ + free ## want_arg (free_func, arg, ptr); \ + return 1; \ + } \ + pr_info(#func " detected saturation\n"); \ + return 0; \ +} + +/* + * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) + * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) + * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) + * | | | + */ +DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); +DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0); +DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); +DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); +DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); +DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); + +static int __init test_overflow_allocation(void) +{ + const char device_name[] = "overflow-test"; + struct device *dev; + int err = 0; + + /* Create dummy device for devm_kmalloc()-family tests. */ + dev = root_device_register(device_name); + if (!dev) { + pr_warn("Cannot register test device\n"); + return 1; + } + + err |= test_kmalloc(NULL); + err |= test_kmalloc_node(NULL); + err |= test_kzalloc(NULL); + err |= test_kzalloc_node(NULL); + err |= test_kvmalloc(NULL); + err |= test_kvmalloc_node(NULL); + err |= test_kvzalloc(NULL); + err |= test_kvzalloc_node(NULL); + err |= test_vmalloc(NULL); + err |= test_vmalloc_node(NULL); + err |= test_vzalloc(NULL); + err |= test_vzalloc_node(NULL); + err |= test_devm_kmalloc(dev); + err |= test_devm_kzalloc(dev); + + device_unregister(dev); + + return err; +} + +static int __init test_module_init(void) +{ + int err = 0; + + err |= test_overflow_calculation(); + err |= test_overflow_allocation(); + + if (err) { + pr_warn("FAIL!\n"); + err = -EINVAL; + } else { + pr_info("all tests passed\n"); + } + + return err; +} + +static void __exit test_module_exit(void) +{ } + +module_init(test_module_init); +module_exit(test_module_exit); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/lib/test_printf.c b/lib/test_printf.c index 71ebfa43ad05..cea592f402ed 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -204,7 +204,7 @@ test_string(void) #if BITS_PER_LONG == 64 #define PTR_WIDTH 16 -#define PTR ((void *)0xffff0123456789ab) +#define PTR ((void *)0xffff0123456789abUL) #define PTR_STR "ffff0123456789ab" #define ZEROS "00000000" /* hex 32 zero bits */ diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c index d7e06b28de38..0a559a42359b 100644 --- a/lib/ucs2_string.c +++ b/lib/ucs2_string.c @@ -112,3 +112,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) return j; } EXPORT_SYMBOL(ucs2_as_utf8); + +MODULE_LICENSE("GPL v2"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 23920c5ff728..a48aaa79d352 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -703,6 +703,22 @@ char *symbol_string(char *buf, char *end, void *ptr, #endif } +static const struct printf_spec default_str_spec = { + .field_width = -1, + .precision = -1, +}; + +static const struct printf_spec default_flag_spec = { + .base = 16, + .precision = -1, + .flags = SPECIAL | SMALL, +}; + +static const struct printf_spec default_dec_spec = { + .base = 10, + .precision = -1, +}; + static noinline_for_stack char *resource_string(char *buf, char *end, struct resource *res, struct printf_spec spec, const char *fmt) @@ -732,21 +748,11 @@ char *resource_string(char *buf, char *end, struct resource *res, .precision = -1, .flags = SMALL | ZEROPAD, }; - static const struct printf_spec dec_spec = { - .base = 10, - .precision = -1, - .flags = 0, - }; static const struct printf_spec str_spec = { .field_width = -1, .precision = 10, .flags = LEFT, }; - static const struct printf_spec flag_spec = { - .base = 16, - .precision = -1, - .flags = SPECIAL | SMALL, - }; /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ @@ -770,10 +776,10 @@ char *resource_string(char *buf, char *end, struct resource *res, specp = &mem_spec; } else if (res->flags & IORESOURCE_IRQ) { p = string(p, pend, "irq ", str_spec); - specp = &dec_spec; + specp = &default_dec_spec; } else if (res->flags & IORESOURCE_DMA) { p = string(p, pend, "dma ", str_spec); - specp = &dec_spec; + specp = &default_dec_spec; } else if (res->flags & IORESOURCE_BUS) { p = string(p, pend, "bus ", str_spec); specp = &bus_spec; @@ -803,7 +809,7 @@ char *resource_string(char *buf, char *end, struct resource *res, p = string(p, pend, " disabled", str_spec); } else { p = string(p, pend, " flags ", str_spec); - p = number(p, pend, res->flags, flag_spec); + p = number(p, pend, res->flags, default_flag_spec); } *p++ = ']'; *p = '\0'; @@ -913,9 +919,6 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, int cur, rbot, rtop; bool first = true; - /* reused to print numbers */ - spec = (struct printf_spec){ .base = 10 }; - rbot = cur = find_first_bit(bitmap, nr_bits); while (cur < nr_bits) { rtop = cur; @@ -930,13 +933,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, } first = false; - buf = number(buf, end, rbot, spec); + buf = number(buf, end, rbot, default_dec_spec); if (rbot < rtop) { if (buf < end) *buf = '-'; buf++; - buf = number(buf, end, rtop, spec); + buf = number(buf, end, rtop, default_dec_spec); } rbot = cur; @@ -1354,11 +1357,9 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } -int kptr_restrict __read_mostly; - static noinline_for_stack -char *restricted_pointer(char *buf, char *end, const void *ptr, - struct printf_spec spec) +char *pointer_string(char *buf, char *end, const void *ptr, + struct printf_spec spec) { spec.base = 16; spec.flags |= SMALL; @@ -1367,6 +1368,15 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, spec.flags |= ZEROPAD; } + return number(buf, end, (unsigned long int)ptr, spec); +} + +int kptr_restrict __read_mostly; + +static noinline_for_stack +char *restricted_pointer(char *buf, char *end, const void *ptr, + struct printf_spec spec) +{ switch (kptr_restrict) { case 0: /* Always print %pK values */ @@ -1378,8 +1388,11 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, * kptr_restrict==1 cannot be used in IRQ context * because its test for CAP_SYSLOG would be meaningless. */ - if (in_irq() || in_serving_softirq() || in_nmi()) + if (in_irq() || in_serving_softirq() || in_nmi()) { + if (spec.field_width == -1) + spec.field_width = 2 * sizeof(ptr); return string(buf, end, "pK-error", spec); + } /* * Only print the real pointer value if the current @@ -1404,7 +1417,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, break; } - return number(buf, end, (unsigned long)ptr, spec); + return pointer_string(buf, end, ptr, spec); } static noinline_for_stack @@ -1456,9 +1469,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, return string(buf, end, NULL, spec); switch (fmt[1]) { - case 'r': - return number(buf, end, clk_get_rate(clk), spec); - case 'n': default: #ifdef CONFIG_COMMON_CLK @@ -1474,23 +1484,13 @@ char *format_flags(char *buf, char *end, unsigned long flags, const struct trace_print_flags *names) { unsigned long mask; - const struct printf_spec strspec = { - .field_width = -1, - .precision = -1, - }; - const struct printf_spec numspec = { - .flags = SPECIAL|SMALL, - .field_width = -1, - .precision = -1, - .base = 16, - }; for ( ; flags && names->name; names++) { mask = names->mask; if ((flags & mask) != mask) continue; - buf = string(buf, end, names->name, strspec); + buf = string(buf, end, names->name, default_str_spec); flags &= ~mask; if (flags) { @@ -1501,7 +1501,7 @@ char *format_flags(char *buf, char *end, unsigned long flags, } if (flags) - buf = number(buf, end, flags, numspec); + buf = number(buf, end, flags, default_flag_spec); return buf; } @@ -1548,22 +1548,18 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e { int depth; const struct device_node *parent = np->parent; - static const struct printf_spec strspec = { - .field_width = -1, - .precision = -1, - }; /* special case for root node */ if (!parent) - return string(buf, end, "/", strspec); + return string(buf, end, "/", default_str_spec); for (depth = 0; parent->parent; depth++) parent = parent->parent; for ( ; depth >= 0; depth--) { - buf = string(buf, end, "/", strspec); + buf = string(buf, end, "/", default_str_spec); buf = string(buf, end, device_node_name_for_depth(np, depth), - strspec); + default_str_spec); } return buf; } @@ -1655,20 +1651,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, return widen_string(buf, buf - buf_start, end, spec); } -static noinline_for_stack -char *pointer_string(char *buf, char *end, const void *ptr, - struct printf_spec spec) -{ - spec.base = 16; - spec.flags |= SMALL; - if (spec.field_width == -1) { - spec.field_width = 2 * sizeof(ptr); - spec.flags |= ZEROPAD; - } - - return number(buf, end, (unsigned long int)ptr, spec); -} - static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); static siphash_key_t ptr_key __read_mostly; @@ -1710,13 +1692,13 @@ early_initcall(initialize_ptr_random); /* Maps a pointer to a 32 bit unique identifier. */ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) { + const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)"; unsigned long hashval; - const int default_width = 2 * sizeof(ptr); if (static_branch_unlikely(¬_filled_random_ptr_key)) { - spec.field_width = default_width; + spec.field_width = 2 * sizeof(ptr); /* string length must be less than default_width */ - return string(buf, end, "(ptrval)", spec); + return string(buf, end, str, spec); } #ifdef CONFIG_64BIT @@ -1729,15 +1711,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) #else hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key); #endif - - spec.flags |= SMALL; - if (spec.field_width == -1) { - spec.field_width = default_width; - spec.flags |= ZEROPAD; - } - spec.base = 16; - - return number(buf, end, hashval, spec); + return pointer_string(buf, end, (const void *)hashval, spec); } /* @@ -1750,10 +1724,10 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) * * Right now we handle: * - * - 'F' For symbolic function descriptor pointers with offset - * - 'f' For simple symbolic function names without offset - * - 'S' For symbolic direct pointers with offset - * - 's' For symbolic direct pointers without offset + * - 'S' For symbolic direct pointers (or function descriptors) with offset + * - 's' For symbolic direct pointers (or function descriptors) without offset + * - 'F' Same as 'S' + * - 'f' Same as 's' * - '[FfSs]R' as above with __builtin_extract_return_addr() translation * - 'B' For backtraced symbolic direct pointers with offset * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] @@ -1850,10 +1824,6 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) * ** When making changes please also update: * Documentation/core-api/printk-formats.rst * - * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 - * function pointers are really function descriptors, which contain a - * pointer to the real address. - * * Note: The default behaviour (unadorned %p) is to hash the address, * rendering it useful as a unique identifier. */ @@ -2129,6 +2099,7 @@ qualifier: case 'x': spec->flags |= SMALL; + /* fall through */ case 'X': spec->base = 16; @@ -3087,8 +3058,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args) break; case 'i': base = 0; + /* fall through */ case 'd': is_sign = true; + /* fall through */ case 'u': break; case '%': |