diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 11 | ||||
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 2 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/crc4.c | 46 | ||||
-rw-r--r-- | lib/dma-noop.c | 21 | ||||
-rw-r--r-- | lib/dma-virt.c | 12 | ||||
-rw-r--r-- | lib/errseq.c | 208 | ||||
-rw-r--r-- | lib/flex_proportions.c | 6 | ||||
-rw-r--r-- | lib/iov_iter.c | 116 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 167 | ||||
-rw-r--r-- | lib/nlattr.c | 11 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 3 | ||||
-rw-r--r-- | lib/percpu_counter.c | 4 | ||||
-rw-r--r-- | lib/raid6/mktables.c | 20 | ||||
-rw-r--r-- | lib/strnlen_user.c | 34 | ||||
-rw-r--r-- | lib/test_bpf.c | 59 | ||||
-rw-r--r-- | lib/usercopy.c | 10 | ||||
-rw-r--r-- | lib/vsprintf.c | 136 |
19 files changed, 750 insertions, 121 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 0c8b78a9ae2e..6762529ad9e4 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -158,6 +158,14 @@ config CRC32_BIT endchoice +config CRC4 + tristate "CRC4 functions" + help + This option is provided for the case where no in-kernel-tree + modules require CRC4 functions, but a module built outside + the kernel tree does. Such modules that use library CRC4 + functions require M here. + config CRC7 tristate "CRC7 functions" help @@ -548,6 +556,9 @@ config ARCH_HAS_SG_CHAIN config ARCH_HAS_PMEM_API bool +config ARCH_HAS_UACCESS_FLUSHCACHE + bool + config ARCH_HAS_MMIO_FLUSH bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9c5d40a50930..ca9460f049b8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -286,7 +286,7 @@ config DEBUG_FS write to these files. For detailed documentation on the debugfs API, see - Documentation/DocBook/filesystems. + Documentation/filesystems/. If unsure, say N. diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 533f912638ed..ab4ff0eea776 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -13,7 +13,7 @@ menuconfig KGDB CONFIG_FRAME_POINTER to aid in producing more reliable stack backtraces in the external debugger. Documentation of kernel debugger is available at http://kgdb.sourceforge.net - as well as in DocBook form in Documentation/DocBook/. If + as well as in Documentation/dev-tools/kgdb.rst. If unsure, say N. if KGDB diff --git a/lib/Makefile b/lib/Makefile index 07fbe6a75692..5a008329324e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -38,7 +38,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ - once.o refcount.o usercopy.o + once.o refcount.o usercopy.o errseq.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o @@ -96,6 +96,7 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o +obj-$(CONFIG_CRC4) += crc4.o obj-$(CONFIG_CRC7) += crc7.o obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_CRC8) += crc8.o diff --git a/lib/crc4.c b/lib/crc4.c new file mode 100644 index 000000000000..cf6db46661be --- /dev/null +++ b/lib/crc4.c @@ -0,0 +1,46 @@ +/* + * crc4.c - simple crc-4 calculations. + * + * This source code is licensed under the GNU General Public License, Version + * 2. See the file COPYING for more details. + */ + +#include <linux/crc4.h> +#include <linux/module.h> + +static const uint8_t crc4_tab[] = { + 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2, + 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3, +}; + +/** + * crc4 - calculate the 4-bit crc of a value. + * @crc: starting crc4 + * @x: value to checksum + * @bits: number of bits in @x to checksum + * + * Returns the crc4 value of @x, using polynomial 0b10111. + * + * The @x value is treated as left-aligned, and bits above @bits are ignored + * in the crc calculations. + */ +uint8_t crc4(uint8_t c, uint64_t x, int bits) +{ + int i; + + /* mask off anything above the top bit */ + x &= (1ull << bits) - 1; + + /* Align to 4-bits */ + bits = (bits + 3) & ~0x3; + + /* Calculate crc4 over four-bit nibbles, starting at the MSbit */ + for (i = bits - 4; i >= 0; i -= 4) + c = crc4_tab[c ^ ((x >> i) & 0xf)]; + + return c; +} +EXPORT_SYMBOL_GPL(crc4); + +MODULE_DESCRIPTION("CRC4 calculations"); +MODULE_LICENSE("GPL"); diff --git a/lib/dma-noop.c b/lib/dma-noop.c index de26c8b68f34..acc4190e2731 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> +#include <linux/pfn.h> static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size, ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) - *dma_handle = virt_to_phys(ret); + *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + return ret; } @@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return page_to_phys(page) + offset; + return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, @@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { + dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); - sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; sg_dma_len(sg) = sg->length; } return nents; } -static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static int dma_noop_supported(struct device *dev, u64 mask) -{ - return 1; -} - const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, - .mapping_error = dma_noop_mapping_error, - .dma_supported = dma_noop_supported, }; EXPORT_SYMBOL(dma_noop_ops); diff --git a/lib/dma-virt.c b/lib/dma-virt.c index dcd4df1f7174..5c4f11329721 100644 --- a/lib/dma-virt.c +++ b/lib/dma-virt.c @@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} - -static int dma_virt_supported(struct device *dev, u64 mask) -{ - return true; -} - const struct dma_map_ops dma_virt_ops = { .alloc = dma_virt_alloc, .free = dma_virt_free, .map_page = dma_virt_map_page, .map_sg = dma_virt_map_sg, - .mapping_error = dma_virt_mapping_error, - .dma_supported = dma_virt_supported, }; EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/errseq.c b/lib/errseq.c new file mode 100644 index 000000000000..841fa24e6e00 --- /dev/null +++ b/lib/errseq.c @@ -0,0 +1,208 @@ +#include <linux/err.h> +#include <linux/bug.h> +#include <linux/atomic.h> +#include <linux/errseq.h> + +/* + * An errseq_t is a way of recording errors in one place, and allowing any + * number of "subscribers" to tell whether it has changed since a previous + * point where it was sampled. + * + * It's implemented as an unsigned 32-bit value. The low order bits are + * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits + * are used as a counter. This is done with atomics instead of locking so that + * these functions can be called from any context. + * + * The general idea is for consumers to sample an errseq_t value. That value + * can later be used to tell whether any new errors have occurred since that + * sampling was done. + * + * Note that there is a risk of collisions if new errors are being recorded + * frequently, since we have so few bits to use as a counter. + * + * To mitigate this, one bit is used as a flag to tell whether the value has + * been sampled since a new value was recorded. That allows us to avoid bumping + * the counter if no one has sampled it since the last time an error was + * recorded. + * + * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes + * is the special (but common) case where there has never been an error. An all + * zero value thus serves as the "epoch" if one wishes to know whether there + * has ever been an error set since it was first initialized. + */ + +/* The low bits are designated for error code (max of MAX_ERRNO) */ +#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1) + +/* This bit is used as a flag to indicate whether the value has been seen */ +#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT) + +/* The lowest bit of the counter */ +#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1)) + +/** + * __errseq_set - set a errseq_t for later reporting + * @eseq: errseq_t field that should be set + * @err: error to set + * + * This function sets the error in *eseq, and increments the sequence counter + * if the last sequence was sampled at some point in the past. + * + * Any error set will always overwrite an existing error. + * + * Most callers will want to use the errseq_set inline wrapper to efficiently + * handle the common case where err is 0. + * + * We do return an errseq_t here, primarily for debugging purposes. The return + * value should not be used as a previously sampled value in later calls as it + * will not have the SEEN flag set. + */ +errseq_t __errseq_set(errseq_t *eseq, int err) +{ + errseq_t cur, old; + + /* MAX_ERRNO must be able to serve as a mask */ + BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1); + + /* + * Ensure the error code actually fits where we want it to go. If it + * doesn't then just throw a warning and don't record anything. We + * also don't accept zero here as that would effectively clear a + * previous error. + */ + old = READ_ONCE(*eseq); + + if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO), + "err = %d\n", err)) + return old; + + for (;;) { + errseq_t new; + + /* Clear out error bits and set new error */ + new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err; + + /* Only increment if someone has looked at it */ + if (old & ERRSEQ_SEEN) + new += ERRSEQ_CTR_INC; + + /* If there would be no change, then call it done */ + if (new == old) { + cur = new; + break; + } + + /* Try to swap the new value into place */ + cur = cmpxchg(eseq, old, new); + + /* + * Call it success if we did the swap or someone else beat us + * to it for the same value. + */ + if (likely(cur == old || cur == new)) + break; + + /* Raced with an update, try again */ + old = cur; + } + return cur; +} +EXPORT_SYMBOL(__errseq_set); + +/** + * errseq_sample - grab current errseq_t value + * @eseq: pointer to errseq_t to be sampled + * + * This function allows callers to sample an errseq_t value, marking it as + * "seen" if required. + */ +errseq_t errseq_sample(errseq_t *eseq) +{ + errseq_t old = READ_ONCE(*eseq); + errseq_t new = old; + + /* + * For the common case of no errors ever having been set, we can skip + * marking the SEEN bit. Once an error has been set, the value will + * never go back to zero. + */ + if (old != 0) { + new |= ERRSEQ_SEEN; + if (old != new) + cmpxchg(eseq, old, new); + } + return new; +} +EXPORT_SYMBOL(errseq_sample); + +/** + * errseq_check - has an error occurred since a particular sample point? + * @eseq: pointer to errseq_t value to be checked + * @since: previously-sampled errseq_t from which to check + * + * Grab the value that eseq points to, and see if it has changed "since" + * the given value was sampled. The "since" value is not advanced, so there + * is no need to mark the value as seen. + * + * Returns the latest error set in the errseq_t or 0 if it hasn't changed. + */ +int errseq_check(errseq_t *eseq, errseq_t since) +{ + errseq_t cur = READ_ONCE(*eseq); + + if (likely(cur == since)) + return 0; + return -(cur & MAX_ERRNO); +} +EXPORT_SYMBOL(errseq_check); + +/** + * errseq_check_and_advance - check an errseq_t and advance to current value + * @eseq: pointer to value being checked and reported + * @since: pointer to previously-sampled errseq_t to check against and advance + * + * Grab the eseq value, and see whether it matches the value that "since" + * points to. If it does, then just return 0. + * + * If it doesn't, then the value has changed. Set the "seen" flag, and try to + * swap it into place as the new eseq value. Then, set that value as the new + * "since" value, and return whatever the error portion is set to. + * + * Note that no locking is provided here for concurrent updates to the "since" + * value. The caller must provide that if necessary. Because of this, callers + * may want to do a lockless errseq_check before taking the lock and calling + * this. + */ +int errseq_check_and_advance(errseq_t *eseq, errseq_t *since) +{ + int err = 0; + errseq_t old, new; + + /* + * Most callers will want to use the inline wrapper to check this, + * so that the common case of no error is handled without needing + * to take the lock that protects the "since" value. + */ + old = READ_ONCE(*eseq); + if (old != *since) { + /* + * Set the flag and try to swap it into place if it has + * changed. + * + * We don't care about the outcome of the swap here. If the + * swap doesn't occur, then it has either been updated by a + * writer who is altering the value in some way (updating + * counter or resetting the error), or another reader who is + * just setting the "seen" flag. Either outcome is OK, and we + * can advance "since" and return an error based on what we + * have. + */ + new = old | ERRSEQ_SEEN; + if (new != old) + cmpxchg(eseq, old, new); + *since = new; + err = -(new & MAX_ERRNO); + } + return err; +} +EXPORT_SYMBOL(errseq_check_and_advance); diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); - __percpu_counter_add(&pl->events, + percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, return; } else fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f835964c9485..52c8dd6d8e82 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -130,6 +130,24 @@ } \ } +static int copyout(void __user *to, const void *from, size_t n) +{ + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); + n = raw_copy_to_user(to, from, n); + } + return n; +} + +static int copyin(void *to, const void __user *from, size_t n) +{ + if (access_ok(VERIFY_READ, from, n)) { + kasan_check_write(to, n); + n = raw_copy_from_user(to, from, n); + } + return n; +} + static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { @@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b from = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b kaddr = kmap(page); from = kaddr + offset; - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t to = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t kaddr = kmap(page); to = kaddr + offset; - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter(addr, bytes, i); + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, - v.iov_len), + copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), memcpy_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) @@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_to_iter); +EXPORT_SYMBOL(_copy_to_iter); -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len), + copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) @@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter); +EXPORT_SYMBOL(_copy_from_iter); -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; + if (iter_is_iovec(i)) + might_fault(); iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user((to += v.iov_len) - v.iov_len, + if (copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), @@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full); +EXPORT_SYMBOL(_copy_from_iter_full); -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -613,9 +637,31 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter_nocache); +EXPORT_SYMBOL(_copy_from_iter_nocache); -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +{ + char *to = addr; + if (unlikely(i->type & ITER_PIPE)) { + WARN_ON(1); + return 0; + } + iterate_and_advance(i, bytes, v, + __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len), + memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len), + memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, + v.iov_len) + ) + + return bytes; +} +EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); +#endif + +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -637,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full_nocache); +EXPORT_SYMBOL(_copy_from_iter_full_nocache); + +static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) +{ + size_t v = n + offset; + if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) + return true; + WARN_ON(1); + return false; +} size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); @@ -657,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); - size_t wanted = copy_from_iter(kaddr + offset, bytes, i); + size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; } else @@ -700,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) if (unlikely(i->type & ITER_PIPE)) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, - __clear_user(v.iov_base, v.iov_len), + clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) ) @@ -713,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr = kmap_atomic(page), *p = kaddr + offset; + if (unlikely(!page_copy_sane(page, offset, bytes))) { + kunmap_atomic(kaddr); + return 0; + } if (unlikely(i->type & ITER_PIPE)) { kunmap_atomic(kaddr); WARN_ON(1); return 0; } iterate_all_kinds(i, bytes, v, - __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, - v.iov_base, v.iov_len), + copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9a2b811966eb..719c155fce20 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -23,6 +23,8 @@ #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> +#include <linux/uuid.h> +#include <linux/ctype.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -52,19 +54,13 @@ static const char *kobject_actions[] = { [KOBJ_OFFLINE] = "offline", }; -/** - * kobject_action_type - translate action string to numeric type - * - * @buf: buffer containing the action string, newline is ignored - * @count: length of buffer - * @type: pointer to the location to store the action type - * - * Returns 0 if the action string was recognized. - */ -int kobject_action_type(const char *buf, size_t count, - enum kobject_action *type) +static int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type, + const char **args) { enum kobject_action action; + size_t count_first; + const char *args_start; int ret = -EINVAL; if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) @@ -73,11 +69,20 @@ int kobject_action_type(const char *buf, size_t count, if (!count) goto out; + args_start = strnchr(buf, count, ' '); + if (args_start) { + count_first = args_start - buf; + args_start = args_start + 1; + } else + count_first = count; + for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { - if (strncmp(kobject_actions[action], buf, count) != 0) + if (strncmp(kobject_actions[action], buf, count_first) != 0) continue; - if (kobject_actions[action][count] != '\0') + if (kobject_actions[action][count_first] != '\0') continue; + if (args) + *args = args_start; *type = action; ret = 0; break; @@ -86,6 +91,142 @@ out: return ret; } +static const char *action_arg_word_end(const char *buf, const char *buf_end, + char delim) +{ + const char *next = buf; + + while (next <= buf_end && *next != delim) + if (!isalnum(*next++)) + return NULL; + + if (next == buf) + return NULL; + + return next; +} + +static int kobject_action_args(const char *buf, size_t count, + struct kobj_uevent_env **ret_env) +{ + struct kobj_uevent_env *env = NULL; + const char *next, *buf_end, *key; + int key_len; + int r = -EINVAL; + + if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0')) + count--; + + if (!count) + return -EINVAL; + + env = kzalloc(sizeof(*env), GFP_KERNEL); + if (!env) + return -ENOMEM; + + /* first arg is UUID */ + if (count < UUID_STRING_LEN || !uuid_is_valid(buf) || + add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf)) + goto out; + + /* + * the rest are custom environment variables in KEY=VALUE + * format with ' ' delimiter between each KEY=VALUE pair + */ + next = buf + UUID_STRING_LEN; + buf_end = buf + count - 1; + + while (next <= buf_end) { + if (*next != ' ') + goto out; + + /* skip the ' ', key must follow */ + key = ++next; + if (key > buf_end) + goto out; + + buf = next; + next = action_arg_word_end(buf, buf_end, '='); + if (!next || next > buf_end || *next != '=') + goto out; + key_len = next - buf; + + /* skip the '=', value must follow */ + if (++next > buf_end) + goto out; + + buf = next; + next = action_arg_word_end(buf, buf_end, ' '); + if (!next) + goto out; + + if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s", + key_len, key, (int) (next - buf), buf)) + goto out; + } + + r = 0; +out: + if (r) + kfree(env); + else + *ret_env = env; + return r; +} + +/** + * kobject_synth_uevent - send synthetic uevent with arguments + * + * @kobj: struct kobject for which synthetic uevent is to be generated + * @buf: buffer containing action type and action args, newline is ignored + * @count: length of buffer + * + * Returns 0 if kobject_synthetic_uevent() is completed with success or the + * corresponding error when it fails. + */ +int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) +{ + char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL }; + enum kobject_action action; + const char *action_args; + struct kobj_uevent_env *env; + const char *msg = NULL, *devpath; + int r; + + r = kobject_action_type(buf, count, &action, &action_args); + if (r) { + msg = "unknown uevent action string\n"; + goto out; + } + + if (!action_args) { + r = kobject_uevent_env(kobj, action, no_uuid_envp); + goto out; + } + + r = kobject_action_args(action_args, + count - (action_args - buf), &env); + if (r == -EINVAL) { + msg = "incorrect uevent action arguments\n"; + goto out; + } + + if (r) + goto out; + + r = kobject_uevent_env(kobj, action, env->envp); + kfree(env); +out: + if (r) { + devpath = kobject_get_path(kobj, GFP_KERNEL); + printk(KERN_WARNING "synth uevent: %s: %s", + devpath ?: "unknown device", + msg ?: "failed to send uevent"); + kfree(devpath); + } + return r; +} + #ifdef CONFIG_NET static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) { diff --git a/lib/nlattr.c b/lib/nlattr.c index a7e0b16078df..fb52435be42d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -352,7 +352,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; - nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); + nla = skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); @@ -398,12 +398,7 @@ EXPORT_SYMBOL(__nla_reserve_64bit); */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { - void *start; - - start = skb_put(skb, NLA_ALIGN(attrlen)); - memset(start, 0, NLA_ALIGN(attrlen)); - - return start; + return skb_put_zero(skb, NLA_ALIGN(attrlen)); } EXPORT_SYMBOL(__nla_reserve_nohdr); @@ -617,7 +612,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; - memcpy(skb_put(skb, attrlen), data, attrlen); + skb_put_data(skb, data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 4e8a30d1c22f..0bc0a3535a8a 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool nmi_cpu_backtrace(struct pt_regs *regs) { + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { + arch_spin_lock(&lock); if (regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", cpu, instruction_pointer(regs)); @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } + arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..8ee7e5ec21be 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; @@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } preempt_enable(); } -EXPORT_SYMBOL(__percpu_counter_add); +EXPORT_SYMBOL(percpu_counter_add_batch); /* * Add up all the per-cpu counts, return the result. This is a more accurate diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 39787db588b0..e824d088f72c 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c @@ -125,6 +125,26 @@ int main(int argc, char *argv[]) printf("EXPORT_SYMBOL(raid6_gfexp);\n"); printf("#endif\n"); + /* Compute log-of-2 table */ + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gflog[256] =\n" "{\n"); + for (i = 0; i < 256; i += 8) { + printf("\t"); + for (j = 0; j < 8; j++) { + v = 255; + for (k = 0; k < 256; k++) + if (exptbl[k] == (i + j)) { + v = k; + break; + } + printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); + } + } + printf("};\n"); + printf("#ifdef __KERNEL__\n"); + printf("EXPORT_SYMBOL(raid6_gflog);\n"); + printf("#endif\n"); + /* Compute inverse table x^-1 == x^254 */ printf("\nconst u8 __attribute__((aligned(256)))\n" "raid6_gfinv[256] =\n" "{\n"); diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 8e105ed4df12..a5f567747ced 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count) return 0; } EXPORT_SYMBOL(strnlen_user); - -/** - * strlen_user: - Get the size of a user string INCLUDING final NUL. - * @str: The string to measure. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -long strlen_user(const char __user *str) -{ - unsigned long max_addr, src_addr; - - max_addr = user_addr_max(); - src_addr = (unsigned long)str; - if (likely(src_addr < max_addr)) { - unsigned long max = max_addr - src_addr; - long retval; - - user_access_begin(); - retval = do_strnlen_user(str, ~0ul, max); - user_access_end(); - return retval; - } - return 0; -} -EXPORT_SYMBOL(strlen_user); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index be88cbaadde3..d9d5a410955c 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -84,6 +84,7 @@ struct bpf_test { } test[MAX_SUBTESTS]; int (*fill_helper)(struct bpf_test *self); __u8 frag_data[MAX_DATA]; + int stack_depth; /* for eBPF only, since tests don't call verifier */ }; /* Large test cases need separate allocation and fill handler. */ @@ -434,6 +435,30 @@ loop: return 0; } +static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_MOV64_REG(R6, R1); + insn[i++] = BPF_LD_ABS(BPF_B, 0); + insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); + i++; + while (i < len - 1) + insn[i++] = BPF_LD_ABS(BPF_B, 1); + insn[i] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + static int __bpf_fill_stxdw(struct bpf_test *self, int size) { unsigned int len = BPF_MAXINSNS; @@ -455,6 +480,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size) self->u.ptr.insns = insn; self->u.ptr.len = len; + self->stack_depth = 40; return 0; } @@ -2317,7 +2343,8 @@ static struct bpf_test tests[] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, - { { 38, 256 } } + { { 38, 256 } }, + .stack_depth = 64, }, /* BPF_ALU | BPF_MOV | BPF_X */ { @@ -4169,6 +4196,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_B: Store/Load byte: max positive", @@ -4181,6 +4209,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7f } }, + .stack_depth = 40, }, { "STX_MEM_B: Store/Load byte: max negative", @@ -4194,6 +4223,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max negative", @@ -4206,6 +4236,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max positive", @@ -4218,6 +4249,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fff } }, + .stack_depth = 40, }, { "STX_MEM_H: Store/Load half word: max negative", @@ -4231,6 +4263,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max negative", @@ -4243,6 +4276,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max positive", @@ -4255,6 +4289,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_W: Store/Load word: max negative", @@ -4268,6 +4303,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative", @@ -4280,6 +4316,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative 2", @@ -4297,6 +4334,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x1 } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max positive", @@ -4309,6 +4347,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_DW: Store/Load double word: max negative", @@ -4322,6 +4361,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, /* BPF_STX | BPF_XADD | BPF_W/DW */ { @@ -4336,6 +4376,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4351,6 +4392,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4363,6 +4405,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_W: X + 1 + 1 + 1 + ...", @@ -4384,6 +4427,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4399,6 +4443,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4411,6 +4456,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_DW: X + 1 + 1 + 1 + ...", @@ -5022,6 +5068,14 @@ static struct bpf_test tests[] = { { { ETH_HLEN, 0xbef } }, .fill_helper = bpf_fill_ld_abs_vlan_push_pop, }, + { + "BPF_MAXINSNS: jump around ld_abs", + { }, + INTERNAL, + { 10, 11 }, + { { 2, 10 } }, + .fill_helper = bpf_fill_jump_around_ld_abs, + }, /* * LD_IND / LD_ABS on fragmented SKBs */ @@ -5663,7 +5717,7 @@ static struct sk_buff *populate_skb(char *buf, int size) if (!skb) return NULL; - memcpy(__skb_put(skb, size), buf, size); + __skb_put_data(skb, buf, size); /* Initialize a fake skb with test pattern. */ skb_reset_mac_header(skb); @@ -5809,6 +5863,7 @@ static struct bpf_prog *generate_filter(int which, int *err) /* Type doesn't really matter here as long as it's not unspec. */ fp->type = BPF_PROG_TYPE_SOCKET_FILTER; memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); + fp->aux->stack_depth = tests[which].stack_depth; /* We cannot error here as we don't need type compatibility * checks. diff --git a/lib/usercopy.c b/lib/usercopy.c index 1b6010a3beb8..f5d9f08ee032 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -6,8 +6,11 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) { + kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); + } if (unlikely(res)) memset(to + (n - res), 0, res); return res; @@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user); #ifndef INLINE_COPY_TO_USER unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) { - if (likely(access_ok(VERIFY_WRITE, to, n))) + might_fault(); + if (likely(access_ok(VERIFY_WRITE, to, n))) { + kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); + } return n; } EXPORT_SYMBOL(_copy_to_user); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 9f37d6208e99..86c3385b9eb3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -31,6 +31,7 @@ #include <linux/dcache.h> #include <linux/cred.h> #include <linux/uuid.h> +#include <linux/of.h> #include <net/addrconf.h> #ifdef CONFIG_BLOCK #include <linux/blkdev.h> @@ -1470,6 +1471,126 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt) return format_flags(buf, end, flags, names); } +static const char *device_node_name_for_depth(const struct device_node *np, int depth) +{ + for ( ; np && depth; depth--) + np = np->parent; + + return kbasename(np->full_name); +} + +static noinline_for_stack +char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end) +{ + int depth; + const struct device_node *parent = np->parent; + static const struct printf_spec strspec = { + .field_width = -1, + .precision = -1, + }; + + /* special case for root node */ + if (!parent) + return string(buf, end, "/", strspec); + + for (depth = 0; parent->parent; depth++) + parent = parent->parent; + + for ( ; depth >= 0; depth--) { + buf = string(buf, end, "/", strspec); + buf = string(buf, end, device_node_name_for_depth(np, depth), + strspec); + } + return buf; +} + +static noinline_for_stack +char *device_node_string(char *buf, char *end, struct device_node *dn, + struct printf_spec spec, const char *fmt) +{ + char tbuf[sizeof("xxxx") + 1]; + const char *p; + int ret; + char *buf_start = buf; + struct property *prop; + bool has_mult, pass; + static const struct printf_spec num_spec = { + .flags = SMALL, + .field_width = -1, + .precision = -1, + .base = 10, + }; + + struct printf_spec str_spec = spec; + str_spec.field_width = -1; + + if (!IS_ENABLED(CONFIG_OF)) + return string(buf, end, "(!OF)", spec); + + if ((unsigned long)dn < PAGE_SIZE) + return string(buf, end, "(null)", spec); + + /* simple case without anything any more format specifiers */ + fmt++; + if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0) + fmt = "f"; + + for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { + if (pass) { + if (buf < end) + *buf = ':'; + buf++; + } + + switch (*fmt) { + case 'f': /* full_name */ + buf = device_node_gen_full_name(dn, buf, end); + break; + case 'n': /* name */ + buf = string(buf, end, dn->name, str_spec); + break; + case 'p': /* phandle */ + buf = number(buf, end, (unsigned int)dn->phandle, num_spec); + break; + case 'P': /* path-spec */ + p = kbasename(of_node_full_name(dn)); + if (!p[1]) + p = "/"; + buf = string(buf, end, p, str_spec); + break; + case 'F': /* flags */ + tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-'; + tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-'; + tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-'; + tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-'; + tbuf[4] = 0; + buf = string(buf, end, tbuf, str_spec); + break; + case 'c': /* major compatible string */ + ret = of_property_read_string(dn, "compatible", &p); + if (!ret) + buf = string(buf, end, p, str_spec); + break; + case 'C': /* full compatible string */ + has_mult = false; + of_property_for_each_string(dn, "compatible", prop, p) { + if (has_mult) + buf = string(buf, end, ",", str_spec); + buf = string(buf, end, "\"", str_spec); + buf = string(buf, end, p, str_spec); + buf = string(buf, end, "\"", str_spec); + + has_mult = true; + } + break; + default: + break; + } + } + + return widen_string(buf, buf - buf_start, end, spec); +} + int kptr_restrict __read_mostly; /* @@ -1566,6 +1687,16 @@ int kptr_restrict __read_mostly; * p page flags (see struct page) given as pointer to unsigned long * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t * v vma flags (VM_*) given as pointer to unsigned long + * - 'O' For a kobject based struct. Must be one of the following: + * - 'OF[fnpPcCF]' For a device tree object + * Without any optional arguments prints the full_name + * f device node full_name + * n device node name + * p device node phandle + * P device node path spec (name + @unit) + * F device node flags + * c major compatible string + * C full compatible string * * ** Please update also Documentation/printk-formats.txt when making changes ** * @@ -1721,6 +1852,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'G': return flags_string(buf, end, ptr, fmt); + case 'O': + switch (fmt[1]) { + case 'F': + return device_node_string(buf, end, ptr, spec, fmt + 1); + } } spec.flags |= SMALL; if (spec.field_width == -1) { |