summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig11
-rw-r--r--lib/Kconfig.debug189
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/Makefile6
-rw-r--r--lib/cmdline.c6
-rw-r--r--lib/cpumask.c32
-rw-r--r--lib/crc4.c46
-rw-r--r--lib/dma-noop.c21
-rw-r--r--lib/dma-virt.c12
-rw-r--r--lib/errseq.c208
-rw-r--r--lib/flex_proportions.c6
-rw-r--r--lib/iov_iter.c116
-rw-r--r--lib/kobject_uevent.c167
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/locking-selftest-rtmutex.h11
-rw-r--r--lib/locking-selftest.c133
-rw-r--r--lib/nlattr.c11
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/percpu_counter.c4
-rw-r--r--lib/raid6/mktables.c20
-rw-r--r--lib/refcount.c3
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/strnlen_user.c34
-rw-r--r--lib/test_bpf.c59
-rw-r--r--lib/usercopy.c10
-rw-r--r--lib/vsprintf.c136
26 files changed, 913 insertions, 341 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 0c8b78a9ae2e..6762529ad9e4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -158,6 +158,14 @@ config CRC32_BIT
endchoice
+config CRC4
+ tristate "CRC4 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC4 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC4
+ functions require M here.
+
config CRC7
tristate "CRC7 functions"
help
@@ -548,6 +556,9 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config ARCH_HAS_UACCESS_FLUSHCACHE
+ bool
+
config ARCH_HAS_MMIO_FLUSH
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e4587ebe52c7..ca9460f049b8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -286,7 +286,7 @@ config DEBUG_FS
write to these files.
For detailed documentation on the debugfs API, see
- Documentation/DocBook/filesystems.
+ Documentation/filesystems/.
If unsure, say N.
@@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
select LOCKDEP
help
This feature will check whether any held lock (spinlock, rwlock,
@@ -1067,6 +1068,7 @@ config PROVE_LOCKING
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
@@ -1121,6 +1123,7 @@ config LOCK_STAT
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
default n
help
@@ -1301,189 +1304,7 @@ config DEBUG_CREDENTIALS
If unsure, say N.
-menu "RCU Debugging"
-
-config PROVE_RCU
- def_bool PROVE_LOCKING
-
-config PROVE_RCU_REPEATEDLY
- bool "RCU debugging: don't disable PROVE_RCU on first splat"
- depends on PROVE_RCU
- default n
- help
- By itself, PROVE_RCU will disable checking upon issuing the
- first warning (or "splat"). This feature prevents such
- disabling, allowing multiple RCU-lockdep warnings to be printed
- on a single reboot.
-
- Say Y to allow multiple RCU-lockdep warnings per boot.
-
- Say N if you are unsure.
-
-config SPARSE_RCU_POINTER
- bool "RCU debugging: sparse-based checks for pointer usage"
- default n
- help
- This feature enables the __rcu sparse annotation for
- RCU-protected pointers. This annotation will cause sparse
- to flag any non-RCU used of annotated pointers. This can be
- helpful when debugging RCU usage. Please note that this feature
- is not intended to enforce code cleanliness; it is instead merely
- a debugging aid.
-
- Say Y to make sparse flag questionable use of RCU-protected pointers
-
- Say N if you are unsure.
-
-config TORTURE_TEST
- tristate
- default n
-
-config RCU_PERF_TEST
- tristate "performance tests for RCU"
- depends on DEBUG_KERNEL
- select TORTURE_TEST
- select SRCU
- select TASKS_RCU
- default n
- help
- This option provides a kernel module that runs performance
- tests on the RCU infrastructure. The kernel module may be built
- after the fact on the running kernel to be tested, if desired.
-
- Say Y here if you want RCU performance tests to be built into
- the kernel.
- Say M if you want the RCU performance tests to build as a module.
- Say N if you are unsure.
-
-config RCU_TORTURE_TEST
- tristate "torture tests for RCU"
- depends on DEBUG_KERNEL
- select TORTURE_TEST
- select SRCU
- select TASKS_RCU
- default n
- help
- This option provides a kernel module that runs torture tests
- on the RCU infrastructure. The kernel module may be built
- after the fact on the running kernel to be tested, if desired.
-
- Say Y here if you want RCU torture tests to be built into
- the kernel.
- Say M if you want the RCU torture tests to build as a module.
- Say N if you are unsure.
-
-config RCU_TORTURE_TEST_SLOW_PREINIT
- bool "Slow down RCU grace-period pre-initialization to expose races"
- depends on RCU_TORTURE_TEST
- help
- This option delays grace-period pre-initialization (the
- propagation of CPU-hotplug changes up the rcu_node combining
- tree) for a few jiffies between initializing each pair of
- consecutive rcu_node structures. This helps to expose races
- involving grace-period pre-initialization, in other words, it
- makes your kernel less stable. It can also greatly increase
- grace-period latency, especially on systems with large numbers
- of CPUs. This is useful when torture-testing RCU, but in
- almost no other circumstance.
-
- Say Y here if you want your system to crash and hang more often.
- Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
- int "How much to slow down RCU grace-period pre-initialization"
- range 0 5
- default 3
- depends on RCU_TORTURE_TEST_SLOW_PREINIT
- help
- This option specifies the number of jiffies to wait between
- each rcu_node structure pre-initialization step.
-
-config RCU_TORTURE_TEST_SLOW_INIT
- bool "Slow down RCU grace-period initialization to expose races"
- depends on RCU_TORTURE_TEST
- help
- This option delays grace-period initialization for a few
- jiffies between initializing each pair of consecutive
- rcu_node structures. This helps to expose races involving
- grace-period initialization, in other words, it makes your
- kernel less stable. It can also greatly increase grace-period
- latency, especially on systems with large numbers of CPUs.
- This is useful when torture-testing RCU, but in almost no
- other circumstance.
-
- Say Y here if you want your system to crash and hang more often.
- Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_INIT_DELAY
- int "How much to slow down RCU grace-period initialization"
- range 0 5
- default 3
- depends on RCU_TORTURE_TEST_SLOW_INIT
- help
- This option specifies the number of jiffies to wait between
- each rcu_node structure initialization.
-
-config RCU_TORTURE_TEST_SLOW_CLEANUP
- bool "Slow down RCU grace-period cleanup to expose races"
- depends on RCU_TORTURE_TEST
- help
- This option delays grace-period cleanup for a few jiffies
- between cleaning up each pair of consecutive rcu_node
- structures. This helps to expose races involving grace-period
- cleanup, in other words, it makes your kernel less stable.
- It can also greatly increase grace-period latency, especially
- on systems with large numbers of CPUs. This is useful when
- torture-testing RCU, but in almost no other circumstance.
-
- Say Y here if you want your system to crash and hang more often.
- Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
- int "How much to slow down RCU grace-period cleanup"
- range 0 5
- default 3
- depends on RCU_TORTURE_TEST_SLOW_CLEANUP
- help
- This option specifies the number of jiffies to wait between
- each rcu_node structure cleanup operation.
-
-config RCU_CPU_STALL_TIMEOUT
- int "RCU CPU stall timeout in seconds"
- depends on RCU_STALL_COMMON
- range 3 300
- default 21
- help
- If a given RCU grace period extends more than the specified
- number of seconds, a CPU stall warning is printed. If the
- RCU grace period persists, additional CPU stall warnings are
- printed at more widely spaced intervals.
-
-config RCU_TRACE
- bool "Enable tracing for RCU"
- depends on DEBUG_KERNEL
- default y if TREE_RCU
- select TRACE_CLOCK
- help
- This option provides tracing in RCU which presents stats
- in debugfs for debugging RCU implementation. It also enables
- additional tracepoints for ftrace-style event tracing.
-
- Say Y here if you want to enable RCU tracing
- Say N if you are unsure.
-
-config RCU_EQS_DEBUG
- bool "Provide debugging asserts for adding NO_HZ support to an arch"
- depends on DEBUG_KERNEL
- help
- This option provides consistency checks in RCU's handling of
- NO_HZ. These checks have proven quite helpful in detecting
- bugs in arch-specific NO_HZ code.
-
- Say N here if you need ultimate kernel/user switch latencies
- Say Y if you are unsure
-
-endmenu # "RCU Debugging"
+source "kernel/rcu/Kconfig.debug"
config DEBUG_WQ_FORCE_RR_CPU
bool "Force round-robin CPU selection for unbound work items"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 533f912638ed..ab4ff0eea776 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -13,7 +13,7 @@ menuconfig KGDB
CONFIG_FRAME_POINTER to aid in producing more reliable stack
backtraces in the external debugger. Documentation of
kernel debugger is available at http://kgdb.sourceforge.net
- as well as in DocBook form in Documentation/DocBook/. If
+ as well as in Documentation/dev-tools/kgdb.rst. If
unsure, say N.
if KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 0166fbc0fa81..5a008329324e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,9 +25,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
earlycpio.o seq_buf.o siphash.o \
nmi_backtrace.o nodemask.o win_minmax.o
-CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
-CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
-
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
@@ -41,7 +38,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o
+ once.o refcount.o usercopy.o errseq.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
@@ -99,6 +96,7 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
+obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 3c6432df7e63..4c0888c4a68d 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -23,14 +23,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
- for (x = *pint; x < upper_range; x++)
+ for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x;
return inc_counter;
}
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
break;
if (res == 3) {
int range_nums;
- range_nums = get_range((char **)&str, ints + i);
+ range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0)
break;
/*
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 81dedaab36cc..4731a0895760 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
EXPORT_SYMBOL(cpumask_any_but);
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ int next;
+
+again:
+ next = cpumask_next(n, mask);
+
+ if (wrap && n < start && next >= start) {
+ return nr_cpumask_bits;
+
+ } else if (next >= nr_cpumask_bits) {
+ wrap = true;
+ n = -1;
+ goto again;
+ }
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
diff --git a/lib/crc4.c b/lib/crc4.c
new file mode 100644
index 000000000000..cf6db46661be
--- /dev/null
+++ b/lib/crc4.c
@@ -0,0 +1,46 @@
+/*
+ * crc4.c - simple crc-4 calculations.
+ *
+ * This source code is licensed under the GNU General Public License, Version
+ * 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc4.h>
+#include <linux/module.h>
+
+static const uint8_t crc4_tab[] = {
+ 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2,
+ 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3,
+};
+
+/**
+ * crc4 - calculate the 4-bit crc of a value.
+ * @crc: starting crc4
+ * @x: value to checksum
+ * @bits: number of bits in @x to checksum
+ *
+ * Returns the crc4 value of @x, using polynomial 0b10111.
+ *
+ * The @x value is treated as left-aligned, and bits above @bits are ignored
+ * in the crc calculations.
+ */
+uint8_t crc4(uint8_t c, uint64_t x, int bits)
+{
+ int i;
+
+ /* mask off anything above the top bit */
+ x &= (1ull << bits) - 1;
+
+ /* Align to 4-bits */
+ bits = (bits + 3) & ~0x3;
+
+ /* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+ for (i = bits - 4; i >= 0; i -= 4)
+ c = crc4_tab[c ^ ((x >> i) & 0xf)];
+
+ return c;
+}
+EXPORT_SYMBOL_GPL(crc4);
+
+MODULE_DESCRIPTION("CRC4 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index de26c8b68f34..acc4190e2731 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/pfn.h>
static void *dma_noop_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
@@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size,
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret)
- *dma_handle = virt_to_phys(ret);
+ *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
+
return ret;
}
@@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return page_to_phys(page) + offset;
+ return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
}
static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
@@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
void *va;
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
+ sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
sg_dma_len(sg) = sg->length;
}
return nents;
}
-static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static int dma_noop_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
const struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc,
.free = dma_noop_free,
.map_page = dma_noop_map_page,
.map_sg = dma_noop_map_sg,
- .mapping_error = dma_noop_mapping_error,
- .dma_supported = dma_noop_supported,
};
EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
index dcd4df1f7174..5c4f11329721 100644
--- a/lib/dma-virt.c
+++ b/lib/dma-virt.c
@@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return false;
-}
-
-static int dma_virt_supported(struct device *dev, u64 mask)
-{
- return true;
-}
-
const struct dma_map_ops dma_virt_ops = {
.alloc = dma_virt_alloc,
.free = dma_virt_free,
.map_page = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
- .mapping_error = dma_virt_mapping_error,
- .dma_supported = dma_virt_supported,
};
EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/errseq.c b/lib/errseq.c
new file mode 100644
index 000000000000..841fa24e6e00
--- /dev/null
+++ b/lib/errseq.c
@@ -0,0 +1,208 @@
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/atomic.h>
+#include <linux/errseq.h>
+
+/*
+ * An errseq_t is a way of recording errors in one place, and allowing any
+ * number of "subscribers" to tell whether it has changed since a previous
+ * point where it was sampled.
+ *
+ * It's implemented as an unsigned 32-bit value. The low order bits are
+ * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits
+ * are used as a counter. This is done with atomics instead of locking so that
+ * these functions can be called from any context.
+ *
+ * The general idea is for consumers to sample an errseq_t value. That value
+ * can later be used to tell whether any new errors have occurred since that
+ * sampling was done.
+ *
+ * Note that there is a risk of collisions if new errors are being recorded
+ * frequently, since we have so few bits to use as a counter.
+ *
+ * To mitigate this, one bit is used as a flag to tell whether the value has
+ * been sampled since a new value was recorded. That allows us to avoid bumping
+ * the counter if no one has sampled it since the last time an error was
+ * recorded.
+ *
+ * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes
+ * is the special (but common) case where there has never been an error. An all
+ * zero value thus serves as the "epoch" if one wishes to know whether there
+ * has ever been an error set since it was first initialized.
+ */
+
+/* The low bits are designated for error code (max of MAX_ERRNO) */
+#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1)
+
+/* This bit is used as a flag to indicate whether the value has been seen */
+#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
+
+/* The lowest bit of the counter */
+#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
+
+/**
+ * __errseq_set - set a errseq_t for later reporting
+ * @eseq: errseq_t field that should be set
+ * @err: error to set
+ *
+ * This function sets the error in *eseq, and increments the sequence counter
+ * if the last sequence was sampled at some point in the past.
+ *
+ * Any error set will always overwrite an existing error.
+ *
+ * Most callers will want to use the errseq_set inline wrapper to efficiently
+ * handle the common case where err is 0.
+ *
+ * We do return an errseq_t here, primarily for debugging purposes. The return
+ * value should not be used as a previously sampled value in later calls as it
+ * will not have the SEEN flag set.
+ */
+errseq_t __errseq_set(errseq_t *eseq, int err)
+{
+ errseq_t cur, old;
+
+ /* MAX_ERRNO must be able to serve as a mask */
+ BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1);
+
+ /*
+ * Ensure the error code actually fits where we want it to go. If it
+ * doesn't then just throw a warning and don't record anything. We
+ * also don't accept zero here as that would effectively clear a
+ * previous error.
+ */
+ old = READ_ONCE(*eseq);
+
+ if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO),
+ "err = %d\n", err))
+ return old;
+
+ for (;;) {
+ errseq_t new;
+
+ /* Clear out error bits and set new error */
+ new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err;
+
+ /* Only increment if someone has looked at it */
+ if (old & ERRSEQ_SEEN)
+ new += ERRSEQ_CTR_INC;
+
+ /* If there would be no change, then call it done */
+ if (new == old) {
+ cur = new;
+ break;
+ }
+
+ /* Try to swap the new value into place */
+ cur = cmpxchg(eseq, old, new);
+
+ /*
+ * Call it success if we did the swap or someone else beat us
+ * to it for the same value.
+ */
+ if (likely(cur == old || cur == new))
+ break;
+
+ /* Raced with an update, try again */
+ old = cur;
+ }
+ return cur;
+}
+EXPORT_SYMBOL(__errseq_set);
+
+/**
+ * errseq_sample - grab current errseq_t value
+ * @eseq: pointer to errseq_t to be sampled
+ *
+ * This function allows callers to sample an errseq_t value, marking it as
+ * "seen" if required.
+ */
+errseq_t errseq_sample(errseq_t *eseq)
+{
+ errseq_t old = READ_ONCE(*eseq);
+ errseq_t new = old;
+
+ /*
+ * For the common case of no errors ever having been set, we can skip
+ * marking the SEEN bit. Once an error has been set, the value will
+ * never go back to zero.
+ */
+ if (old != 0) {
+ new |= ERRSEQ_SEEN;
+ if (old != new)
+ cmpxchg(eseq, old, new);
+ }
+ return new;
+}
+EXPORT_SYMBOL(errseq_sample);
+
+/**
+ * errseq_check - has an error occurred since a particular sample point?
+ * @eseq: pointer to errseq_t value to be checked
+ * @since: previously-sampled errseq_t from which to check
+ *
+ * Grab the value that eseq points to, and see if it has changed "since"
+ * the given value was sampled. The "since" value is not advanced, so there
+ * is no need to mark the value as seen.
+ *
+ * Returns the latest error set in the errseq_t or 0 if it hasn't changed.
+ */
+int errseq_check(errseq_t *eseq, errseq_t since)
+{
+ errseq_t cur = READ_ONCE(*eseq);
+
+ if (likely(cur == since))
+ return 0;
+ return -(cur & MAX_ERRNO);
+}
+EXPORT_SYMBOL(errseq_check);
+
+/**
+ * errseq_check_and_advance - check an errseq_t and advance to current value
+ * @eseq: pointer to value being checked and reported
+ * @since: pointer to previously-sampled errseq_t to check against and advance
+ *
+ * Grab the eseq value, and see whether it matches the value that "since"
+ * points to. If it does, then just return 0.
+ *
+ * If it doesn't, then the value has changed. Set the "seen" flag, and try to
+ * swap it into place as the new eseq value. Then, set that value as the new
+ * "since" value, and return whatever the error portion is set to.
+ *
+ * Note that no locking is provided here for concurrent updates to the "since"
+ * value. The caller must provide that if necessary. Because of this, callers
+ * may want to do a lockless errseq_check before taking the lock and calling
+ * this.
+ */
+int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
+{
+ int err = 0;
+ errseq_t old, new;
+
+ /*
+ * Most callers will want to use the inline wrapper to check this,
+ * so that the common case of no error is handled without needing
+ * to take the lock that protects the "since" value.
+ */
+ old = READ_ONCE(*eseq);
+ if (old != *since) {
+ /*
+ * Set the flag and try to swap it into place if it has
+ * changed.
+ *
+ * We don't care about the outcome of the swap here. If the
+ * swap doesn't occur, then it has either been updated by a
+ * writer who is altering the value in some way (updating
+ * counter or resetting the error), or another reader who is
+ * just setting the "seen" flag. Either outcome is OK, and we
+ * can advance "since" and return an error based on what we
+ * have.
+ */
+ new = old | ERRSEQ_SEEN;
+ if (new != old)
+ cmpxchg(eseq, old, new);
+ *since = new;
+ err = -(new & MAX_ERRNO);
+ }
+ return err;
+}
+EXPORT_SYMBOL(errseq_check_and_advance);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index a71cf1bdd4c9..2cc1f94e03a1 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
- __percpu_counter_add(&pl->events,
+ percpu_counter_add_batch(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
@@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
@@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
return;
} else
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f835964c9485..52c8dd6d8e82 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -130,6 +130,24 @@
} \
}
+static int copyout(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+
+static int copyin(void *to, const void __user *from, size_t n)
+{
+ if (access_ok(VERIFY_READ, from, n)) {
+ kasan_check_write(to, n);
+ n = raw_copy_from_user(to, from, n);
+ }
+ return n;
+}
+
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
from = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
kaddr = kmap(page);
from = kaddr + offset;
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
to = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
kaddr = kmap(page);
to = kaddr + offset;
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
if (unlikely(i->type & ITER_PIPE))
return copy_pipe_to_iter(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
+ copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_to_iter);
+EXPORT_SYMBOL(_copy_to_iter);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
+ copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter);
+EXPORT_SYMBOL(_copy_from_iter);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
+ if (iter_is_iovec(i))
+ might_fault();
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user((to += v.iov_len) - v.iov_len,
+ if (copyin((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full);
+EXPORT_SYMBOL(_copy_from_iter_full);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -613,9 +637,31 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter_nocache);
+EXPORT_SYMBOL(_copy_from_iter_nocache);
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v,
+ __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len),
+ memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
+ v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
+#endif
+
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -637,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full_nocache);
+EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ size_t v = n + offset;
+ if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
+ return true;
+ WARN_ON(1);
+ return false;
+}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
@@ -657,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
- size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
+ size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else
@@ -700,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
if (unlikely(i->type & ITER_PIPE))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
- __clear_user(v.iov_base, v.iov_len),
+ clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
memset(v.iov_base, 0, v.iov_len)
)
@@ -713,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ kunmap_atomic(kaddr);
+ return 0;
+ }
if (unlikely(i->type & ITER_PIPE)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_all_kinds(i, bytes, v,
- __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
+ copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9a2b811966eb..719c155fce20 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -23,6 +23,8 @@
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
+#include <linux/uuid.h>
+#include <linux/ctype.h>
#include <net/sock.h>
#include <net/net_namespace.h>
@@ -52,19 +54,13 @@ static const char *kobject_actions[] = {
[KOBJ_OFFLINE] = "offline",
};
-/**
- * kobject_action_type - translate action string to numeric type
- *
- * @buf: buffer containing the action string, newline is ignored
- * @count: length of buffer
- * @type: pointer to the location to store the action type
- *
- * Returns 0 if the action string was recognized.
- */
-int kobject_action_type(const char *buf, size_t count,
- enum kobject_action *type)
+static int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type,
+ const char **args)
{
enum kobject_action action;
+ size_t count_first;
+ const char *args_start;
int ret = -EINVAL;
if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
@@ -73,11 +69,20 @@ int kobject_action_type(const char *buf, size_t count,
if (!count)
goto out;
+ args_start = strnchr(buf, count, ' ');
+ if (args_start) {
+ count_first = args_start - buf;
+ args_start = args_start + 1;
+ } else
+ count_first = count;
+
for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
- if (strncmp(kobject_actions[action], buf, count) != 0)
+ if (strncmp(kobject_actions[action], buf, count_first) != 0)
continue;
- if (kobject_actions[action][count] != '\0')
+ if (kobject_actions[action][count_first] != '\0')
continue;
+ if (args)
+ *args = args_start;
*type = action;
ret = 0;
break;
@@ -86,6 +91,142 @@ out:
return ret;
}
+static const char *action_arg_word_end(const char *buf, const char *buf_end,
+ char delim)
+{
+ const char *next = buf;
+
+ while (next <= buf_end && *next != delim)
+ if (!isalnum(*next++))
+ return NULL;
+
+ if (next == buf)
+ return NULL;
+
+ return next;
+}
+
+static int kobject_action_args(const char *buf, size_t count,
+ struct kobj_uevent_env **ret_env)
+{
+ struct kobj_uevent_env *env = NULL;
+ const char *next, *buf_end, *key;
+ int key_len;
+ int r = -EINVAL;
+
+ if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0'))
+ count--;
+
+ if (!count)
+ return -EINVAL;
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ /* first arg is UUID */
+ if (count < UUID_STRING_LEN || !uuid_is_valid(buf) ||
+ add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf))
+ goto out;
+
+ /*
+ * the rest are custom environment variables in KEY=VALUE
+ * format with ' ' delimiter between each KEY=VALUE pair
+ */
+ next = buf + UUID_STRING_LEN;
+ buf_end = buf + count - 1;
+
+ while (next <= buf_end) {
+ if (*next != ' ')
+ goto out;
+
+ /* skip the ' ', key must follow */
+ key = ++next;
+ if (key > buf_end)
+ goto out;
+
+ buf = next;
+ next = action_arg_word_end(buf, buf_end, '=');
+ if (!next || next > buf_end || *next != '=')
+ goto out;
+ key_len = next - buf;
+
+ /* skip the '=', value must follow */
+ if (++next > buf_end)
+ goto out;
+
+ buf = next;
+ next = action_arg_word_end(buf, buf_end, ' ');
+ if (!next)
+ goto out;
+
+ if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s",
+ key_len, key, (int) (next - buf), buf))
+ goto out;
+ }
+
+ r = 0;
+out:
+ if (r)
+ kfree(env);
+ else
+ *ret_env = env;
+ return r;
+}
+
+/**
+ * kobject_synth_uevent - send synthetic uevent with arguments
+ *
+ * @kobj: struct kobject for which synthetic uevent is to be generated
+ * @buf: buffer containing action type and action args, newline is ignored
+ * @count: length of buffer
+ *
+ * Returns 0 if kobject_synthetic_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
+{
+ char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL };
+ enum kobject_action action;
+ const char *action_args;
+ struct kobj_uevent_env *env;
+ const char *msg = NULL, *devpath;
+ int r;
+
+ r = kobject_action_type(buf, count, &action, &action_args);
+ if (r) {
+ msg = "unknown uevent action string\n";
+ goto out;
+ }
+
+ if (!action_args) {
+ r = kobject_uevent_env(kobj, action, no_uuid_envp);
+ goto out;
+ }
+
+ r = kobject_action_args(action_args,
+ count - (action_args - buf), &env);
+ if (r == -EINVAL) {
+ msg = "incorrect uevent action arguments\n";
+ goto out;
+ }
+
+ if (r)
+ goto out;
+
+ r = kobject_uevent_env(kobj, action, env->envp);
+ kfree(env);
+out:
+ if (r) {
+ devpath = kobject_get_path(kobj, GFP_KERNEL);
+ printk(KERN_WARNING "synth uevent: %s: %s",
+ devpath ?: "unknown device",
+ msg ?: "failed to send uevent");
+ kfree(devpath);
+ }
+ return r;
+}
+
#ifdef CONFIG_NET
static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
{
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7f2562..9f79547d1b97 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
}
EXPORT_SYMBOL(crc32c);
diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h
new file mode 100644
index 000000000000..e3cb83989d16
--- /dev/null
+++ b/lib/locking-selftest-rtmutex.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK RTL
+
+#undef UNLOCK
+#define UNLOCK RTU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT RTI
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index f3a217ea0388..6f2b135dc5e8 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
+#include <linux/rtmutex.h>
/*
* Change this to 1 if you want to see the failure printouts:
@@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_MUTEX 0x4
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
+#define LOCKTYPE_RTMUTEX 0x20
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B);
static DECLARE_RWSEM(rwsem_C);
static DECLARE_RWSEM(rwsem_D);
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_A);
+static DEFINE_RT_MUTEX(rtmutex_B);
+static DEFINE_RT_MUTEX(rtmutex_C);
+static DEFINE_RT_MUTEX(rtmutex_D);
+
+#endif
+
/*
* Locks that we initialize dynamically as well so that
* e.g. X1 and X2 becomes two instances of the same class,
@@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2);
static DECLARE_RWSEM(rwsem_Z1);
static DECLARE_RWSEM(rwsem_Z2);
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_X1);
+static DEFINE_RT_MUTEX(rtmutex_X2);
+static DEFINE_RT_MUTEX(rtmutex_Y1);
+static DEFINE_RT_MUTEX(rtmutex_Y2);
+static DEFINE_RT_MUTEX(rtmutex_Z1);
+static DEFINE_RT_MUTEX(rtmutex_Z2);
+
+#endif
+
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
@@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z)
static void init_shared_classes(void)
{
+#ifdef CONFIG_RT_MUTEXES
+ static struct lock_class_key rt_X, rt_Y, rt_Z;
+
+ __rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
+ __rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
+ __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
+ __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
+ __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
+ __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
+#endif
+
init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
@@ -193,6 +226,10 @@ static void init_shared_classes(void)
#define MU(x) mutex_unlock(&mutex_##x)
#define MI(x) mutex_init(&mutex_##x)
+#define RTL(x) rt_mutex_lock(&rtmutex_##x)
+#define RTU(x) rt_mutex_unlock(&rtmutex_##x)
+#define RTI(x) rt_mutex_init(&rtmutex_##x)
+
#define WSL(x) down_write(&rwsem_##x)
#define WSU(x) up_write(&rwsem_##x)
@@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(AA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(AA_rtmutex);
+#endif
+
#undef E
/*
@@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBA_rtmutex);
+#endif
+
#undef E
/*
@@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCA_rtmutex);
+#endif
+
#undef E
/*
@@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCABC_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCABC_rtmutex);
+#endif
+
#undef E
/*
@@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABBCCDDA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCDDA_rtmutex);
+#endif
+
#undef E
/*
@@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBDDA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBDDA_rtmutex);
+#endif
+
#undef E
/*
@@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(ABCDBCDA_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBCDA_rtmutex);
+#endif
+
#undef E
/*
@@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(double_unlock_rsem)
-#undef E
-
-/*
- * Bad unlock ordering:
- */
-#define E() \
- \
- LOCK(A); \
- LOCK(B); \
- UNLOCK(A); /* fail */ \
- UNLOCK(B);
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(bad_unlock_order_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(bad_unlock_order_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(bad_unlock_order_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(bad_unlock_order_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(bad_unlock_order_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(bad_unlock_order_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(double_unlock_rtmutex);
+#endif
#undef E
@@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem)
#include "locking-selftest-rsem.h"
GENERATE_TESTCASE(init_held_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(init_held_rtmutex);
+#endif
+
#undef E
/*
@@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
+#ifdef CONFIG_RT_MUTEXES
+# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
+#endif
#else
# define I_SPINLOCK(x)
# define I_RWLOCK(x)
@@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
# define I_WW(x)
#endif
+#ifndef I_RTMUTEX
+# define I_RTMUTEX(x)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
+#else
+#define I2_RTMUTEX(x)
+#endif
+
#define I1(x) \
do { \
I_SPINLOCK(x); \
I_RWLOCK(x); \
I_MUTEX(x); \
I_RWSEM(x); \
+ I_RTMUTEX(x); \
} while (0)
#define I2(x) \
@@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
+ I2_RTMUTEX(x); \
} while (0)
static void reset_locks(void)
@@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
reset_locks();
}
+#ifdef CONFIG_RT_MUTEXES
+#define dotest_rt(fn, e, m) dotest((fn), (e), (m))
+#else
+#define dotest_rt(fn, e, m)
+#endif
+
static inline void print_testname(const char *testname)
{
printk("%33s:", testname);
@@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_6_SUCCESS(desc, name) \
@@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
/*
@@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname)
dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
pr_cont("\n");
#define DO_TESTCASE_2I(desc, name, nr) \
@@ -1825,7 +1903,6 @@ void locking_selftest(void)
DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
DO_TESTCASE_6("double unlock", double_unlock);
DO_TESTCASE_6("initialize held", init_held);
- DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
printk(" --------------------------------------------------------------------------\n");
print_testname("recursive read-lock");
diff --git a/lib/nlattr.c b/lib/nlattr.c
index a7e0b16078df..fb52435be42d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -352,7 +352,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct nlattr *nla;
- nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+ nla = skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
@@ -398,12 +398,7 @@ EXPORT_SYMBOL(__nla_reserve_64bit);
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
- void *start;
-
- start = skb_put(skb, NLA_ALIGN(attrlen));
- memset(start, 0, NLA_ALIGN(attrlen));
-
- return start;
+ return skb_put_zero(skb, NLA_ALIGN(attrlen));
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
@@ -617,7 +612,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
- memcpy(skb_put(skb, attrlen), data, attrlen);
+ skb_put_data(skb, data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 4e8a30d1c22f..0bc0a3535a8a 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
cpu, instruction_pointer(regs));
@@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
+ arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9c21000df0b5..8ee7e5ec21be 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
@@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
}
preempt_enable();
}
-EXPORT_SYMBOL(__percpu_counter_add);
+EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 39787db588b0..e824d088f72c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -125,6 +125,26 @@ int main(int argc, char *argv[])
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
+ /* Compute log-of-2 table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gflog[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ v = 255;
+ for (k = 0; k < 256; k++)
+ if (exptbl[k] == (i + j)) {
+ v = k;
+ break;
+ }
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gflog);\n");
+ printf("#endif\n");
+
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
diff --git a/lib/refcount.c b/lib/refcount.c
index 9f906783987e..5d0582a9480c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,6 +37,8 @@
#include <linux/refcount.h>
#include <linux/bug.h>
+#ifdef CONFIG_REFCOUNT_FULL
+
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
@@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r)
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
EXPORT_SYMBOL(refcount_dec);
+#endif /* CONFIG_REFCOUNT_FULL */
/**
* refcount_dec_if_one - decrement a refcount if it is 1
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 690d75b132fa..2fb007be0212 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -28,7 +28,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
/*
* It is valid to assume CPU-locality during early bootup:
*/
- if (system_state != SYSTEM_RUNNING)
+ if (system_state < SYSTEM_SCHEDULING)
goto out;
/*
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 8e105ed4df12..a5f567747ced 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count)
return 0;
}
EXPORT_SYMBOL(strnlen_user);
-
-/**
- * strlen_user: - Get the size of a user string INCLUDING final NUL.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-long strlen_user(const char __user *str)
-{
- unsigned long max_addr, src_addr;
-
- max_addr = user_addr_max();
- src_addr = (unsigned long)str;
- if (likely(src_addr < max_addr)) {
- unsigned long max = max_addr - src_addr;
- long retval;
-
- user_access_begin();
- retval = do_strnlen_user(str, ~0ul, max);
- user_access_end();
- return retval;
- }
- return 0;
-}
-EXPORT_SYMBOL(strlen_user);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index be88cbaadde3..d9d5a410955c 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -84,6 +84,7 @@ struct bpf_test {
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
__u8 frag_data[MAX_DATA];
+ int stack_depth; /* for eBPF only, since tests don't call verifier */
};
/* Large test cases need separate allocation and fill handler. */
@@ -434,6 +435,30 @@ loop:
return 0;
}
+static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_MOV64_REG(R6, R1);
+ insn[i++] = BPF_LD_ABS(BPF_B, 0);
+ insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2);
+ i++;
+ while (i < len - 1)
+ insn[i++] = BPF_LD_ABS(BPF_B, 1);
+ insn[i] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
static int __bpf_fill_stxdw(struct bpf_test *self, int size)
{
unsigned int len = BPF_MAXINSNS;
@@ -455,6 +480,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size)
self->u.ptr.insns = insn;
self->u.ptr.len = len;
+ self->stack_depth = 40;
return 0;
}
@@ -2317,7 +2343,8 @@ static struct bpf_test tests[] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
- { { 38, 256 } }
+ { { 38, 256 } },
+ .stack_depth = 64,
},
/* BPF_ALU | BPF_MOV | BPF_X */
{
@@ -4169,6 +4196,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xff } },
+ .stack_depth = 40,
},
{
"ST_MEM_B: Store/Load byte: max positive",
@@ -4181,6 +4209,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x7f } },
+ .stack_depth = 40,
},
{
"STX_MEM_B: Store/Load byte: max negative",
@@ -4194,6 +4223,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xff } },
+ .stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max negative",
@@ -4206,6 +4236,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max positive",
@@ -4218,6 +4249,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x7fff } },
+ .stack_depth = 40,
},
{
"STX_MEM_H: Store/Load half word: max negative",
@@ -4231,6 +4263,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max negative",
@@ -4243,6 +4276,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max positive",
@@ -4255,6 +4289,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
+ .stack_depth = 40,
},
{
"STX_MEM_W: Store/Load word: max negative",
@@ -4268,6 +4303,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative",
@@ -4280,6 +4316,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative 2",
@@ -4297,6 +4334,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x1 } },
+ .stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max positive",
@@ -4309,6 +4347,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
+ .stack_depth = 40,
},
{
"STX_MEM_DW: Store/Load double word: max negative",
@@ -4322,6 +4361,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0xffffffff } },
+ .stack_depth = 40,
},
/* BPF_STX | BPF_XADD | BPF_W/DW */
{
@@ -4336,6 +4376,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x22 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
@@ -4351,6 +4392,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
@@ -4363,6 +4405,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x12 } },
+ .stack_depth = 40,
},
{
"STX_XADD_W: X + 1 + 1 + 1 + ...",
@@ -4384,6 +4427,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x22 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
@@ -4399,6 +4443,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
@@ -4411,6 +4456,7 @@ static struct bpf_test tests[] = {
INTERNAL,
{ },
{ { 0, 0x12 } },
+ .stack_depth = 40,
},
{
"STX_XADD_DW: X + 1 + 1 + 1 + ...",
@@ -5022,6 +5068,14 @@ static struct bpf_test tests[] = {
{ { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
+ {
+ "BPF_MAXINSNS: jump around ld_abs",
+ { },
+ INTERNAL,
+ { 10, 11 },
+ { { 2, 10 } },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ },
/*
* LD_IND / LD_ABS on fragmented SKBs
*/
@@ -5663,7 +5717,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
if (!skb)
return NULL;
- memcpy(__skb_put(skb, size), buf, size);
+ __skb_put_data(skb, buf, size);
/* Initialize a fake skb with test pattern. */
skb_reset_mac_header(skb);
@@ -5809,6 +5863,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
/* Type doesn't really matter here as long as it's not unspec. */
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
+ fp->aux->stack_depth = tests[which].stack_depth;
/* We cannot error here as we don't need type compatibility
* checks.
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 1b6010a3beb8..f5d9f08ee032 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -6,8 +6,11 @@
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
- if (likely(access_ok(VERIFY_READ, from, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ }
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
@@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
+ kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ }
return n;
}
EXPORT_SYMBOL(_copy_to_user);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 9f37d6208e99..86c3385b9eb3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -31,6 +31,7 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/uuid.h>
+#include <linux/of.h>
#include <net/addrconf.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
@@ -1470,6 +1471,126 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
return format_flags(buf, end, flags, names);
}
+static const char *device_node_name_for_depth(const struct device_node *np, int depth)
+{
+ for ( ; np && depth; depth--)
+ np = np->parent;
+
+ return kbasename(np->full_name);
+}
+
+static noinline_for_stack
+char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
+{
+ int depth;
+ const struct device_node *parent = np->parent;
+ static const struct printf_spec strspec = {
+ .field_width = -1,
+ .precision = -1,
+ };
+
+ /* special case for root node */
+ if (!parent)
+ return string(buf, end, "/", strspec);
+
+ for (depth = 0; parent->parent; depth++)
+ parent = parent->parent;
+
+ for ( ; depth >= 0; depth--) {
+ buf = string(buf, end, "/", strspec);
+ buf = string(buf, end, device_node_name_for_depth(np, depth),
+ strspec);
+ }
+ return buf;
+}
+
+static noinline_for_stack
+char *device_node_string(char *buf, char *end, struct device_node *dn,
+ struct printf_spec spec, const char *fmt)
+{
+ char tbuf[sizeof("xxxx") + 1];
+ const char *p;
+ int ret;
+ char *buf_start = buf;
+ struct property *prop;
+ bool has_mult, pass;
+ static const struct printf_spec num_spec = {
+ .flags = SMALL,
+ .field_width = -1,
+ .precision = -1,
+ .base = 10,
+ };
+
+ struct printf_spec str_spec = spec;
+ str_spec.field_width = -1;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return string(buf, end, "(!OF)", spec);
+
+ if ((unsigned long)dn < PAGE_SIZE)
+ return string(buf, end, "(null)", spec);
+
+ /* simple case without anything any more format specifiers */
+ fmt++;
+ if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
+ fmt = "f";
+
+ for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ if (pass) {
+ if (buf < end)
+ *buf = ':';
+ buf++;
+ }
+
+ switch (*fmt) {
+ case 'f': /* full_name */
+ buf = device_node_gen_full_name(dn, buf, end);
+ break;
+ case 'n': /* name */
+ buf = string(buf, end, dn->name, str_spec);
+ break;
+ case 'p': /* phandle */
+ buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
+ break;
+ case 'P': /* path-spec */
+ p = kbasename(of_node_full_name(dn));
+ if (!p[1])
+ p = "/";
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'F': /* flags */
+ tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
+ tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
+ tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
+ tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
+ tbuf[4] = 0;
+ buf = string(buf, end, tbuf, str_spec);
+ break;
+ case 'c': /* major compatible string */
+ ret = of_property_read_string(dn, "compatible", &p);
+ if (!ret)
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'C': /* full compatible string */
+ has_mult = false;
+ of_property_for_each_string(dn, "compatible", prop, p) {
+ if (has_mult)
+ buf = string(buf, end, ",", str_spec);
+ buf = string(buf, end, "\"", str_spec);
+ buf = string(buf, end, p, str_spec);
+ buf = string(buf, end, "\"", str_spec);
+
+ has_mult = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return widen_string(buf, buf - buf_start, end, spec);
+}
+
int kptr_restrict __read_mostly;
/*
@@ -1566,6 +1687,16 @@ int kptr_restrict __read_mostly;
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
+ * - 'O' For a kobject based struct. Must be one of the following:
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
*
* ** Please update also Documentation/printk-formats.txt when making changes **
*
@@ -1721,6 +1852,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'G':
return flags_string(buf, end, ptr, fmt);
+ case 'O':
+ switch (fmt[1]) {
+ case 'F':
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ }
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
OpenPOWER on IntegriCloud