diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 15 | ||||
-rw-r--r-- | lib/Kconfig.ubsan | 4 | ||||
-rw-r--r-- | lib/atomic64_test.c | 2 | ||||
-rw-r--r-- | lib/cpumask.c | 1 | ||||
-rw-r--r-- | lib/dump_stack.c | 7 | ||||
-rw-r--r-- | lib/klist.c | 6 | ||||
-rw-r--r-- | lib/list_debug.c | 9 | ||||
-rw-r--r-- | lib/scatterlist.c | 6 | ||||
-rw-r--r-- | lib/test_static_keys.c | 62 | ||||
-rw-r--r-- | lib/ucs2_string.c | 62 | ||||
-rw-r--r-- | lib/vsprintf.c | 26 |
11 files changed, 147 insertions, 53 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ecb9e75614bf..8bfd1aca7a3d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1400,6 +1400,21 @@ config RCU_EQS_DEBUG endmenu # "RCU Debugging" +config DEBUG_WQ_FORCE_RR_CPU + bool "Force round-robin CPU selection for unbound work items" + depends on DEBUG_KERNEL + default n + help + Workqueue used to implicitly guarantee that work items queued + without explicit CPU specified are put on the local CPU. This + guarantee is no longer true and while local CPU is still + preferred work items may be put on foreign CPUs. Kernel + parameter "workqueue.debug_force_rr_cpu" is added to force + round-robin CPU selection to flush out usages which depend on the + now broken guarantee. This config option enables the debug + feature by default. When enabled, memory and cache locality will + be impacted. + config DEBUG_BLOCK_EXT_DEVT bool "Force extended block device numbers and spread them" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 49518fb48cab..e07c1ba9ba13 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -18,6 +18,8 @@ config UBSAN_SANITIZE_ALL This option activates instrumentation for the entire kernel. If you don't enable this option, you have to explicitly specify UBSAN_SANITIZE := y for the files/directories you want to check for UB. + Enabling this option will get kernel image size increased + significantly. config UBSAN_ALIGNMENT bool "Enable checking of pointers alignment" @@ -25,5 +27,5 @@ config UBSAN_ALIGNMENT default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS help This option enables detection of unaligned memory accesses. - Enabling this option on architectures that support unalligned + Enabling this option on architectures that support unaligned accesses may produce a lot of false positives. diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index d62de8bf022d..123481814320 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c @@ -17,7 +17,7 @@ #include <linux/atomic.h> #ifdef CONFIG_X86 -#include <asm/processor.h> /* for boot_cpu_has below */ +#include <asm/cpufeature.h> /* for boot_cpu_has below */ #endif #define TEST(bit, op, c_op, val) \ diff --git a/lib/cpumask.c b/lib/cpumask.c index 5a70f6196f57..81dedaab36cc 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -41,6 +41,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) break; return i; } +EXPORT_SYMBOL(cpumask_any_but); /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 6745c6230db3..c30d07e99dba 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1); asmlinkage __visible void dump_stack(void) { + unsigned long flags; int was_locked; int old; int cpu; @@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void) * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - preempt_disable(); - retry: + local_irq_save(flags); cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { @@ -43,6 +43,7 @@ retry: } else if (old == cpu) { was_locked = 1; } else { + local_irq_restore(flags); cpu_relax(); goto retry; } @@ -52,7 +53,7 @@ retry: if (!was_locked) atomic_set(&dump_lock, -1); - preempt_enable(); + local_irq_restore(flags); } #else asmlinkage __visible void dump_stack(void) diff --git a/lib/klist.c b/lib/klist.c index d74cf7a29afd..0507fa5d84c5 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; - i->i_cur = n; - if (n) - kref_get(&n->n_ref); + i->i_cur = NULL; + if (n && kref_get_unless_zero(&n->n_ref)) + i->i_cur = n; } EXPORT_SYMBOL_GPL(klist_iter_init_node); diff --git a/lib/list_debug.c b/lib/list_debug.c index 3345a089ef7b..3859bf63561c 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -12,13 +12,6 @@ #include <linux/kernel.h> #include <linux/rculist.h> -static struct list_head force_poison; -void list_force_poison(struct list_head *entry) -{ - entry->next = &force_poison; - entry->prev = &force_poison; -} - /* * Insert a new entry between two known consecutive entries. * @@ -30,8 +23,6 @@ void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { - WARN(new->next == &force_poison || new->prev == &force_poison, - "list_add attempted on force-poisoned entry\n"); WARN(next->prev != prev, "list_add corruption. next->prev should be " "prev (%p), but was %p. (next=%p).\n", diff --git a/lib/scatterlist.c b/lib/scatterlist.c index bafa9933fa76..004fc70fc56a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -598,9 +598,9 @@ EXPORT_SYMBOL(sg_miter_next); * * Description: * Stops mapping iterator @miter. @miter should have been started - * started using sg_miter_start(). A stopped iteration can be - * resumed by calling sg_miter_next() on it. This is useful when - * resources (kmap) need to be released during iteration. + * using sg_miter_start(). A stopped iteration can be resumed by + * calling sg_miter_next() on it. This is useful when resources (kmap) + * need to be released during iteration. * * Context: * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c index c61b299e367f..915d75df2086 100644 --- a/lib/test_static_keys.c +++ b/lib/test_static_keys.c @@ -46,8 +46,11 @@ struct test_key { bool (*test_key)(void); }; -#define test_key_func(key, branch) \ - ({bool func(void) { return branch(key); } func; }) +#define test_key_func(key, branch) \ +static bool key ## _ ## branch(void) \ +{ \ + return branch(&key); \ +} static void invert_key(struct static_key *key) { @@ -92,6 +95,25 @@ static int verify_keys(struct test_key *keys, int size, bool invert) return 0; } +test_key_func(old_true_key, static_key_true) +test_key_func(old_false_key, static_key_false) +test_key_func(true_key, static_branch_likely) +test_key_func(true_key, static_branch_unlikely) +test_key_func(false_key, static_branch_likely) +test_key_func(false_key, static_branch_unlikely) +test_key_func(base_old_true_key, static_key_true) +test_key_func(base_inv_old_true_key, static_key_true) +test_key_func(base_old_false_key, static_key_false) +test_key_func(base_inv_old_false_key, static_key_false) +test_key_func(base_true_key, static_branch_likely) +test_key_func(base_true_key, static_branch_unlikely) +test_key_func(base_inv_true_key, static_branch_likely) +test_key_func(base_inv_true_key, static_branch_unlikely) +test_key_func(base_false_key, static_branch_likely) +test_key_func(base_false_key, static_branch_unlikely) +test_key_func(base_inv_false_key, static_branch_likely) +test_key_func(base_inv_false_key, static_branch_unlikely) + static int __init test_static_key_init(void) { int ret; @@ -102,95 +124,95 @@ static int __init test_static_key_init(void) { .init_state = true, .key = &old_true_key, - .test_key = test_key_func(&old_true_key, static_key_true), + .test_key = &old_true_key_static_key_true, }, { .init_state = false, .key = &old_false_key, - .test_key = test_key_func(&old_false_key, static_key_false), + .test_key = &old_false_key_static_key_false, }, /* internal keys - new keys */ { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_likely), + .test_key = &true_key_static_branch_likely, }, { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_unlikely), + .test_key = &true_key_static_branch_unlikely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_likely), + .test_key = &false_key_static_branch_likely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_unlikely), + .test_key = &false_key_static_branch_unlikely, }, /* external keys - old keys */ { .init_state = true, .key = &base_old_true_key, - .test_key = test_key_func(&base_old_true_key, static_key_true), + .test_key = &base_old_true_key_static_key_true, }, { .init_state = false, .key = &base_inv_old_true_key, - .test_key = test_key_func(&base_inv_old_true_key, static_key_true), + .test_key = &base_inv_old_true_key_static_key_true, }, { .init_state = false, .key = &base_old_false_key, - .test_key = test_key_func(&base_old_false_key, static_key_false), + .test_key = &base_old_false_key_static_key_false, }, { .init_state = true, .key = &base_inv_old_false_key, - .test_key = test_key_func(&base_inv_old_false_key, static_key_false), + .test_key = &base_inv_old_false_key_static_key_false, }, /* external keys - new keys */ { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_likely), + .test_key = &base_true_key_static_branch_likely, }, { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_unlikely), + .test_key = &base_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_likely), + .test_key = &base_inv_true_key_static_branch_likely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely), + .test_key = &base_inv_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_likely), + .test_key = &base_false_key_static_branch_likely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_unlikely), + .test_key = &base_false_key_static_branch_unlikely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_likely), + .test_key = &base_inv_false_key_static_branch_likely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely), + .test_key = &base_inv_false_key_static_branch_unlikely, }, }; diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c index 6f500ef2301d..f0b323abb4c6 100644 --- a/lib/ucs2_string.c +++ b/lib/ucs2_string.c @@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) } } EXPORT_SYMBOL(ucs2_strncmp); + +unsigned long +ucs2_utf8size(const ucs2_char_t *src) +{ + unsigned long i; + unsigned long j = 0; + + for (i = 0; i < ucs2_strlen(src); i++) { + u16 c = src[i]; + + if (c >= 0x800) + j += 3; + else if (c >= 0x80) + j += 2; + else + j += 1; + } + + return j; +} +EXPORT_SYMBOL(ucs2_utf8size); + +/* + * copy at most maxlength bytes of whole utf8 characters to dest from the + * ucs2 string src. + * + * The return value is the number of characters copied, not including the + * final NUL character. + */ +unsigned long +ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) +{ + unsigned int i; + unsigned long j = 0; + unsigned long limit = ucs2_strnlen(src, maxlength); + + for (i = 0; maxlength && i < limit; i++) { + u16 c = src[i]; + + if (c >= 0x800) { + if (maxlength < 3) + break; + maxlength -= 3; + dest[j++] = 0xe0 | (c & 0xf000) >> 12; + dest[j++] = 0x80 | (c & 0x0fc0) >> 6; + dest[j++] = 0x80 | (c & 0x003f); + } else if (c >= 0x80) { + if (maxlength < 2) + break; + maxlength -= 2; + dest[j++] = 0xc0 | (c & 0x7c0) >> 6; + dest[j++] = 0x80 | (c & 0x03f); + } else { + maxlength -= 1; + dest[j++] = c & 0x7f; + } + } + if (maxlength) + dest[j] = '\0'; + return j; +} +EXPORT_SYMBOL(ucs2_as_utf8); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 48ff9c36644d..f44e178e6ede 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1590,22 +1590,23 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return buf; } case 'K': - /* - * %pK cannot be used in IRQ context because its test - * for CAP_SYSLOG would be meaningless. - */ - if (kptr_restrict && (in_irq() || in_serving_softirq() || - in_nmi())) { - if (spec.field_width == -1) - spec.field_width = default_width; - return string(buf, end, "pK-error", spec); - } - switch (kptr_restrict) { case 0: /* Always print %pK values */ break; case 1: { + const struct cred *cred; + + /* + * kptr_restrict==1 cannot be used in IRQ context + * because its test for CAP_SYSLOG would be meaningless. + */ + if (in_irq() || in_serving_softirq() || in_nmi()) { + if (spec.field_width == -1) + spec.field_width = default_width; + return string(buf, end, "pK-error", spec); + } + /* * Only print the real pointer value if the current * process has CAP_SYSLOG and is running with the @@ -1615,8 +1616,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, * leak pointer values if a binary opens a file using * %pK and then elevates privileges before reading it. */ - const struct cred *cred = current_cred(); - + cred = current_cred(); if (!has_capability_noaudit(current, CAP_SYSLOG) || !uid_eq(cred->euid, cred->uid) || !gid_eq(cred->egid, cred->gid)) |