diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 13 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 4 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/chacha20.c | 79 | ||||
-rw-r--r-- | lib/crc32.c | 16 | ||||
-rw-r--r-- | lib/dma-noop.c | 9 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 7 | ||||
-rw-r--r-- | lib/earlycpio.c | 5 | ||||
-rw-r--r-- | lib/iommu-helper.c | 3 | ||||
-rw-r--r-- | lib/iov_iter.c | 8 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 14 | ||||
-rw-r--r-- | lib/radix-tree.c | 14 | ||||
-rw-r--r-- | lib/ratelimit.c | 10 | ||||
-rw-r--r-- | lib/rbtree.c | 26 | ||||
-rw-r--r-- | lib/stackdepot.c | 1 | ||||
-rw-r--r-- | lib/strncpy_from_user.c | 8 | ||||
-rw-r--r-- | lib/strnlen_user.c | 7 | ||||
-rw-r--r-- | lib/swiotlb.c | 13 | ||||
-rw-r--r-- | lib/test_hash.c | 4 | ||||
-rw-r--r-- | lib/ubsan.c | 2 |
20 files changed, 187 insertions, 58 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f07842e2d69f..2307d7c89dac 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -709,6 +709,8 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV select DEBUG_FS + select GCC_PLUGINS if !COMPILE_TEST + select GCC_PLUGIN_SANCOV if !COMPILE_TEST help KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). @@ -719,6 +721,17 @@ config KCOV For more details, see Documentation/kcov.txt. +config KCOV_INSTRUMENT_ALL + bool "Instrument all code by default" + depends on KCOV + default y if KCOV + help + If you are doing generic system call fuzzing (like e.g. syzkaller), + then you will want to instrument the whole kernel and you should + say y here. If you are doing more targeted fuzzing (like e.g. + filesystem fuzzing with AFL) then you will want to enable coverage + for more specific subsets of files, and should say n here. + config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 67d8c6838ba9..bd38aab05929 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" - depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) + depends on SLUB || (SLAB && !DEBUG_SLAB) select CONSTRUCTORS - select STACKDEPOT if SLAB + select STACKDEPOT help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/lib/Makefile b/lib/Makefile index 07d06a8b9788..cfa68eb269e4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o md5.o irq_regs.o argv_split.o \ + sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o diff --git a/lib/chacha20.c b/lib/chacha20.c new file mode 100644 index 000000000000..250ceed9ec9a --- /dev/null +++ b/lib/chacha20.c @@ -0,0 +1,79 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/bitops.h> +#include <linux/cryptohash.h> +#include <asm/unaligned.h> +#include <crypto/chacha20.h> + +static inline u32 rotl32(u32 v, u8 n) +{ + return (v << n) | (v >> (sizeof(v) * 8 - n)); +} + +extern void chacha20_block(u32 *state, void *stream) +{ + u32 x[16], *out = stream; + int i; + + for (i = 0; i < ARRAY_SIZE(x); i++) + x[i] = state[i]; + + for (i = 0; i < 20; i += 2) { + x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); + x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); + x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); + x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); + + x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); + x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); + x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); + x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); + + x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); + x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); + x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); + x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); + + x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); + x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); + x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); + x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); + + x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); + x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); + x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); + x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); + + x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); + x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); + x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); + x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); + + x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); + x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); + x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); + x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); + + x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); + x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); + x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); + x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); + } + + for (i = 0; i < ARRAY_SIZE(x); i++) + out[i] = cpu_to_le32(x[i] + state[i]); + + state[12]++; +} +EXPORT_SYMBOL(chacha20_block); diff --git a/lib/crc32.c b/lib/crc32.c index 9a907d489d95..7fbd1a112b9d 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -979,7 +979,6 @@ static int __init crc32c_test(void) int i; int errors = 0; int bytes = 0; - struct timespec start, stop; u64 nsec; unsigned long flags; @@ -999,20 +998,17 @@ static int __init crc32c_test(void) local_irq_save(flags); local_irq_disable(); - getnstimeofday(&start); + nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + test[i].start, test[i].length)) errors++; } - getnstimeofday(&stop); + nsec = ktime_get_ns() - nsec; local_irq_restore(flags); local_irq_enable(); - nsec = stop.tv_nsec - start.tv_nsec + - 1000000000 * (stop.tv_sec - start.tv_sec); - pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); if (errors) @@ -1065,7 +1061,6 @@ static int __init crc32_test(void) int i; int errors = 0; int bytes = 0; - struct timespec start, stop; u64 nsec; unsigned long flags; @@ -1088,7 +1083,7 @@ static int __init crc32_test(void) local_irq_save(flags); local_irq_disable(); - getnstimeofday(&start); + nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { if (test[i].crc_le != crc32_le(test[i].crc, test_buf + test[i].start, test[i].length)) @@ -1098,14 +1093,11 @@ static int __init crc32_test(void) test[i].start, test[i].length)) errors++; } - getnstimeofday(&stop); + nsec = ktime_get_ns() - nsec; local_irq_restore(flags); local_irq_enable(); - nsec = stop.tv_nsec - start.tv_nsec + - 1000000000 * (stop.tv_sec - start.tv_sec); - pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); diff --git a/lib/dma-noop.c b/lib/dma-noop.c index 72145646857e..3d766e78fbe2 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -10,7 +10,7 @@ static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; @@ -22,7 +22,7 @@ static void *dma_noop_alloc(struct device *dev, size_t size, static void dma_noop_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { free_pages((unsigned long)cpu_addr, get_order(size)); } @@ -30,13 +30,14 @@ static void dma_noop_free(struct device *dev, size_t size, static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return page_to_phys(page) + offset; } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, + unsigned long attrs) { int i; struct scatterlist *sg; diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index fe42b6ec3f0c..da796e2dc4f5 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -188,6 +188,13 @@ static int ddebug_change(const struct ddebug_query *query, newflags = (dp->flags & mask) | flags; if (newflags == dp->flags) continue; +#ifdef HAVE_JUMP_LABEL + if (dp->flags & _DPRINTK_FLAGS_PRINT) { + if (!(flags & _DPRINTK_FLAGS_PRINT)) + static_branch_disable(&dp->key.dd_key_true); + } else if (flags & _DPRINTK_FLAGS_PRINT) + static_branch_enable(&dp->key.dd_key_true); +#endif dp->flags = newflags; vpr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, diff --git a/lib/earlycpio.c b/lib/earlycpio.c index 3eb3e4722b8e..db283ba4d2c1 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -125,7 +125,10 @@ struct cpio_data find_cpio_data(const char *path, void *data, if ((ch[C_MODE] & 0170000) == 0100000 && ch[C_NAMESIZE] >= mypathsize && !memcmp(p, path, mypathsize)) { - *nextoff = (long)nptr - (long)data; + + if (nextoff) + *nextoff = (long)nptr - (long)data; + if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { pr_warn( "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index c27e269210c4..a816f3a80625 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -29,8 +29,7 @@ again: index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); if (index < size) { if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { - /* we could do more effectively */ - start = index + 1; + start = ALIGN(shift + index, boundary_size) - shift; goto again; } bitmap_set(map, index, nr); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index d67c8288d95d..9e8c7386b3a0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -144,7 +144,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (!fault_in_pages_writeable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { kaddr = kmap_atomic(page); from = kaddr + offset; @@ -175,6 +175,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ + kaddr = kmap(page); from = kaddr + offset; left = __copy_to_user(buf, from, copy); @@ -193,6 +194,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b bytes -= copy; } kunmap(page); + done: if (skip == iov->iov_len) { iov++; @@ -225,7 +227,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (!fault_in_pages_readable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { kaddr = kmap_atomic(page); to = kaddr + offset; @@ -256,6 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ + kaddr = kmap(page); to = kaddr + offset; left = __copy_from_user(to, buf, copy); @@ -274,6 +277,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes -= copy; } kunmap(page); + done: if (skip == iov->iov_len) { iov++; diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index c6272ae2015e..5a0f75a3bf01 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -363,6 +363,9 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) lzeros = 0; } + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { @@ -390,7 +393,10 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; z %= BYTES_PER_MPI_LIMB; - for (;;) { + while (sg_miter_next(&miter)) { + buff = miter.addr; + len = miter.length; + for (x = 0; x < len; x++) { a <<= 8; a |= *buff++; @@ -400,12 +406,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) } } z += x; - - if (!sg_miter_next(&miter)) - break; - - buff = miter.addr; - len = miter.length; } return val; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 61b8fb529cef..1b7bf7314141 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root) /* * Even if the caller has preloaded, try to allocate from the - * cache first for the new node to get accounted. + * cache first for the new node to get accounted to the memory + * cgroup. */ ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); + gfp_mask | __GFP_NOWARN); if (ret) goto out; @@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) kmemleak_update_trace(ret); goto out; } - ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT); + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); return ret; @@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr) struct radix_tree_node *node; int ret = -ENOMEM; + /* + * Nodes preloaded by one cgroup can be be used by another cgroup, so + * they should never be accounted to any particular memory cgroup. + */ + gfp_mask &= ~__GFP_ACCOUNT; + preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 2c5de86460c5..08f8043cac61 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -46,12 +46,14 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + rs->interval)) { - if (rs->missed) - printk(KERN_WARNING "%s: %d callbacks suppressed\n", - func, rs->missed); + if (rs->missed) { + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { + pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); + rs->missed = 0; + } + } rs->begin = jiffies; rs->printed = 0; - rs->missed = 0; } if (rs->burst && rs->burst > rs->printed) { rs->printed++; diff --git a/lib/rbtree.c b/lib/rbtree.c index 1356454e36de..eb8a19fee110 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -539,17 +539,39 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, { struct rb_node *parent = rb_parent(victim); + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; + /* Set the surrounding nodes to point to the replacement */ - __rb_change_child(victim, new, parent, root); if (victim->rb_left) rb_set_parent(victim->rb_left, new); if (victim->rb_right) rb_set_parent(victim->rb_right, new); + __rb_change_child(victim, new, parent, root); +} +EXPORT_SYMBOL(rb_replace_node); + +void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); /* Copy the pointers/colour from the victim to the replacement */ *new = *victim; + + /* Set the surrounding nodes to point to the replacement */ + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Set the parent's pointer to the new node last after an RCU barrier + * so that the pointers onwards are seen to be set correctly when doing + * an RCU walk over the tree. + */ + __rb_change_child_rcu(victim, new, parent, root); } -EXPORT_SYMBOL(rb_replace_node); +EXPORT_SYMBOL(rb_replace_node_rcu); static struct rb_node *rb_left_deepest_node(const struct rb_node *node) { diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 53ad6c0831ae..60f77f1d470a 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -242,6 +242,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, */ alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); if (page) prealloc = page_address(page); diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 33f655ef48cd..9c5fe8110413 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long unsigned long c, data; /* Fall back to byte-at-a-time if we get a page fault */ - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) - break; + unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); + *(unsigned long *)(dst+res) = c; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); @@ -56,8 +56,7 @@ byte_at_a_time: while (max) { char c; - if (unlikely(unsafe_get_user(c,src+res))) - return -EFAULT; + unsafe_get_user(c,src+res, efault); dst[res] = c; if (!c) return res; @@ -76,6 +75,7 @@ byte_at_a_time: * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's an EFAULT. */ +efault: return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 2625943625d7..8e105ed4df12 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, src -= align; max += align; - if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) - return 0; + unsafe_get_user(c, (unsigned long __user *)src, efault); c |= aligned_byte_mask(align); for (;;) { @@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) - return 0; + unsafe_get_user(c, (unsigned long __user *)(src+res), efault); } res -= align; @@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ +efault: return 0; } diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 76f29ecba8f4..22e13a0e19d7 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -738,7 +738,7 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t map, phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); @@ -807,7 +807,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unmap_single(hwdev, dev_addr, size, dir); } @@ -877,7 +877,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_device); */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; @@ -914,7 +914,7 @@ int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { - return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0); } EXPORT_SYMBOL(swiotlb_map_sg); @@ -924,7 +924,8 @@ EXPORT_SYMBOL(swiotlb_map_sg); */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) + int nelems, enum dma_data_direction dir, + unsigned long attrs) { struct scatterlist *sg; int i; @@ -941,7 +942,7 @@ void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { - return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0); } EXPORT_SYMBOL(swiotlb_unmap_sg); diff --git a/lib/test_hash.c b/lib/test_hash.c index c9549c8b4909..66c5fc8351e8 100644 --- a/lib/test_hash.c +++ b/lib/test_hash.c @@ -155,8 +155,8 @@ test_hash_init(void) buf[j] = '\0'; for (i = 0; i <= j; i++) { - u64 hashlen = hashlen_string(buf+i); - u32 h0 = full_name_hash(buf+i, j-i); + u64 hashlen = hashlen_string(buf+i, buf+i); + u32 h0 = full_name_hash(buf+i, buf+i, j-i); /* Check that hashlen_string gets the length right */ if (hashlen_len(hashlen) != j-i) { diff --git a/lib/ubsan.c b/lib/ubsan.c index 8799ae5e2e42..fb0409df1bcf 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -308,7 +308,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, return; ubsan_prologue(&data->location, &flags); - pr_err("%s address %pk with insufficient space\n", + pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); |