diff options
Diffstat (limited to 'arch')
27 files changed, 142 insertions, 94 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index befe37d4bc0e..53d846f1bfe7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -91,6 +91,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_compute_layout(void); static inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 788ae971f11c..25a73aab438f 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; +extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 127712b0b970..32fc8061aa76 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; + /* + * Asynchronous I/O running in a kernel thread does not have the + * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag + * the user address before checking. + */ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && - test_thread_flag(TIF_TAGGED_ADDR)) + (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) addr = untagged_addr(addr); __chk_user_ptr(addr); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 4fe1514fcbfd..7d02f9966d34 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller) bl prepare_ftrace_return b ftrace_common_return ENDPROC(ftrace_graph_caller) -#else #endif #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ @@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); mcount_exit ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* @@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller) mcount_exit ENDPROC(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ ENTRY(ftrace_stub) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 583f71abbe98..7c6a0a41676f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -76,7 +76,8 @@ alternative_else_nop_endif #ifdef CONFIG_VMAP_STACK /* * Test whether the SP has overflowed, without corrupting a GPR. - * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). + * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) + * should always be zero. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 513b29c3e735..4a9e773a177f 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -21,6 +21,7 @@ #include <asm/fixmap.h> #include <asm/insn.h> #include <asm/kprobes.h> +#include <asm/sections.h> #define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) @@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn) static DEFINE_RAW_SPINLOCK(patch_lock); +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + static void __kprobes *patch_map(void *addr, int fixmap) { unsigned long uintaddr = (uintptr_t) addr; - bool module = !core_kernel_text(uintaddr); + bool image = is_image_text(uintaddr); struct page *page; - if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - page = vmalloc_to_page(addr); - else if (!module) + if (image) page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); else return addr; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ab149bcc3dc7..d4ed9a19d8fe 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/of.h> #include <linux/irq_work.h> #include <linux/kexec.h> +#include <linux/kvm_host.h> #include <asm/alternative.h> #include <asm/atomic.h> @@ -39,6 +40,7 @@ #include <asm/cputype.h> #include <asm/cpu_ops.h> #include <asm/daifflags.h> +#include <asm/kvm_mmu.h> #include <asm/mmu_context.h> #include <asm/numa.h> #include <asm/pgtable.h> @@ -407,6 +409,8 @@ static void __init hyp_mode_check(void) "CPU: CPUs started in inconsistent modes"); else pr_info("CPU: All CPU(s) started at EL1\n"); + if (IS_ENABLED(CONFIG_KVM_ARM_HOST)) + kvm_compute_layout(); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 841a8b4ce114..497f9675071d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -158,9 +158,12 @@ SECTIONS __inittext_begin = .; INIT_TEXT_SECTION(8) + + __exittext_begin = .; .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } + __exittext_end = .; . = ALIGN(4); .altinstructions : { diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 2cf7d4b606c3..dab1fea4752a 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -22,7 +22,7 @@ static u8 tag_lsb; static u64 tag_val; static u64 va_mask; -static void compute_layout(void) +__init void kvm_compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; @@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, BUG_ON(nr_inst != 5); - if (!has_vhe() && !va_mask) - compute_layout(); - for (i = 0; i < nr_inst; i++) { u32 rd, rn, insn, oinsn; @@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, return; } - if (!va_mask) - compute_layout(); - /* * Compute HYP VA by using the same computation as kern_hyp_va() */ diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 93f9f77582ae..0a920b538a89 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_UXN, .val = PTE_UXN, .set = "UXN", + .clear = " ", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index be9481cdf3b9..b65dffdfb201 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long max_dma32 = min; - unsigned long __maybe_unused max_dma = min; + unsigned long __maybe_unused max_dma, max_dma32; memset(zone_size, 0, sizeof(zone_size)); + max_dma = max_dma32 = min; #ifdef CONFIG_ZONE_DMA - max_dma = PFN_DOWN(arm64_dma_phys_limit); + max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; - max_dma32 = max_dma; #endif #ifdef CONFIG_ZONE_DMA32 max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); @@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); - if (start >= max) - continue; #ifdef CONFIG_ZONE_DMA - if (start < max_dma) { - unsigned long dma_end = min_not_zero(end, max_dma); + if (start >= min && start < max_dma) { + unsigned long dma_end = min(end, max_dma); zhole_size[ZONE_DMA] -= dma_end - start; + start = dma_end; } #endif #ifdef CONFIG_ZONE_DMA32 - if (start < max_dma32) { + if (start >= max_dma && start < max_dma32) { unsigned long dma32_end = min(end, max_dma32); - unsigned long dma32_start = max(start, max_dma); - zhole_size[ZONE_DMA32] -= dma32_end - dma32_start; + zhole_size[ZONE_DMA32] -= dma32_end - start; + start = dma32_end; } #endif - if (end > max_dma32) { + if (start >= max_dma32 && start < max) { unsigned long normal_end = min(end, max); - unsigned long normal_start = max(start, max_dma32); - zhole_size[ZONE_NORMAL] -= normal_end - normal_start; + zhole_size[ZONE_NORMAL] -= normal_end - start; } } diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h index 2b451c4496da..0261507dc264 100644 --- a/arch/ia64/include/asm/agp.h +++ b/arch/ia64/include/asm/agp.h @@ -14,8 +14,8 @@ * in coherent mode, which lets us map the AGP memory as normal (write-back) memory * (unlike x86, where it gets mapped "write-coalescing"). */ -#define map_page_into_agp(page) /* nothing */ -#define unmap_page_from_agp(page) /* nothing */ +#define map_page_into_agp(page) do { } while (0) +#define unmap_page_from_agp(page) do { } while (0) #define flush_agp_cache() mb() /* GATT allocation. Returns/accepts GATT kernel virtual address. */ diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S index 52d312d5b4d4..d43a02795a4a 100644 --- a/arch/m68k/coldfire/entry.S +++ b/arch/m68k/coldfire/entry.S @@ -108,7 +108,7 @@ ret_from_exception: btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ jeq Luser_return /* if so, skip resched, signals */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION movel %sp,%d1 /* get thread_info pointer */ andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ movel %d1,%a0 diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 9c63b596e6ce..a09595f00cab 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v) unsigned long val; int rc; - rc = arch_get_random_long(&val); + rc = arch_get_random_seed_long(&val); if (rc) *v = val; diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 603aed229af7..28dcf8222943 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -64,7 +64,7 @@ /* Macro for generating the ***_bits() functions */ #define DEFINE_BITOP(fn, op, prefix) \ -static __inline__ void fn(unsigned long mask, \ +static inline void fn(unsigned long mask, \ volatile unsigned long *_p) \ { \ unsigned long old; \ @@ -86,22 +86,22 @@ DEFINE_BITOP(clear_bits, andc, "") DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) DEFINE_BITOP(change_bits, xor, "") -static __inline__ void set_bit(int nr, volatile unsigned long *addr) +static inline void arch_set_bit(int nr, volatile unsigned long *addr) { set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void clear_bit(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit(int nr, volatile unsigned long *addr) { clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) +static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr) { clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); } -static __inline__ void change_bit(int nr, volatile unsigned long *addr) +static inline void arch_change_bit(int nr, volatile unsigned long *addr) { change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); } @@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output * operands. */ #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ -static __inline__ unsigned long fn( \ +static inline unsigned long fn( \ unsigned long mask, \ volatile unsigned long *_p) \ { \ @@ -138,34 +138,34 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, 0) -static __inline__ int test_and_set_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_set_bit_lock(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) { return test_and_set_bits_lock(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_clear_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } -static __inline__ int test_and_change_bit(unsigned long nr, - volatile unsigned long *addr) +static inline int arch_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) { return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; } #ifdef CONFIG_PPC64 -static __inline__ unsigned long clear_bit_unlock_return_word(int nr, - volatile unsigned long *addr) +static inline unsigned long +clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) { unsigned long old, t; unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); @@ -185,15 +185,18 @@ static __inline__ unsigned long clear_bit_unlock_return_word(int nr, return old; } -/* This is a special function for mm/filemap.c */ -#define clear_bit_unlock_is_negative_byte(nr, addr) \ - (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters)) +/* + * This is a special function for mm/filemap.c + * Bit 7 corresponds to PG_waiters. + */ +#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \ + (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7)) #endif /* CONFIG_PPC64 */ #include <asm-generic/bitops/non-atomic.h> -static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) +static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) { __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); __clear_bit(nr, addr); @@ -215,14 +218,14 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) * fls: find last (most-significant) bit set. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __inline__ int fls(unsigned int x) +static inline int fls(unsigned int x) { return 32 - __builtin_clz(x); } #include <asm-generic/bitops/builtin-__fls.h> -static __inline__ int fls64(__u64 x) +static inline int fls64(__u64 x) { return 64 - __builtin_clzll(x); } @@ -239,6 +242,10 @@ unsigned long __arch_hweight64(__u64 w); #include <asm-generic/bitops/find.h> +/* wrappers that deal with KASAN instrumentation */ +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> + /* Little-endian versions */ #include <asm-generic/bitops/le.h> diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index a115970a6809..40f13f3626d3 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -83,6 +83,7 @@ struct vdso_data { __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */ __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -105,6 +106,7 @@ struct vdso_data { __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */ __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ + __u32 hrtimer_res; /* hrtimer resolution */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f22bd6d1fe93..3d47aec7becf 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -388,6 +388,7 @@ int main(void) OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec); OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); + OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); @@ -413,7 +414,6 @@ int main(void) DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 838d9d4650c7..6f7a3a7162c5 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -240,6 +240,9 @@ set_ivor: bl early_init +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif #ifdef CONFIG_RELOCATABLE mr r3,r30 mr r4,r31 @@ -266,9 +269,6 @@ set_ivor: /* * Decide what sort of machine this is and initialize the MMU. */ -#ifdef CONFIG_KASAN - bl kasan_early_init -#endif mr r3,r30 mr r4,r31 bl machine_init diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 2d13cea13954..1168e8b37e30 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -960,6 +960,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->stamp_xtime_sec = xt.tv_sec; vdso_data->stamp_xtime_nsec = xt.tv_nsec; vdso_data->stamp_sec_fraction = frac_sec; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++(vdso_data->tb_update_count); } diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index c8e6902cb01b..3306672f57a9 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -154,12 +154,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl __get_datapage@local /* get data page */ + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpli cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l stw r3,TSPC32_TV_SEC(r4) stw r5,TSPC32_TV_NSEC(r4) blr diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 1f24e411af80..1c9a04703250 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -186,12 +186,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) cror cr0*4+eq,cr0*4+eq,cr1*4+eq bne cr0,99f + mflr r12 + .cfi_register lr,r12 + bl V_LOCAL_FUNC(__get_datapage) + lwz r5, CLOCK_HRTIMER_RES(r3) + mtlr r12 li r3,0 cmpldi cr0,r4,0 crclr cr0*4+so beqlr - lis r5,CLOCK_REALTIME_RES@h - ori r5,r5,CLOCK_REALTIME_RES@l std r3,TSPC64_TV_SEC(r4) std r5,TSPC64_TV_NSEC(r4) blr diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c index 377712e85605..0666a8d29596 100644 --- a/arch/powerpc/lib/pmem.c +++ b/arch/powerpc/lib/pmem.c @@ -17,14 +17,14 @@ void arch_wb_cache_pmem(void *addr, size_t size) unsigned long start = (unsigned long) addr; flush_dcache_range(start, start + size); } -EXPORT_SYMBOL(arch_wb_cache_pmem); +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { unsigned long start = (unsigned long) addr; flush_dcache_range(start, start + size); } -EXPORT_SYMBOL(arch_invalidate_pmem); +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); /* * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ad299e72ec30..9488b63dfc87 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -121,7 +121,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, unsigned long i; for (i = start; i < stop; i += chunk) { - flush_dcache_range(i, min(stop, start + chunk)); + flush_dcache_range(i, min(stop, i + chunk)); cond_resched(); } } diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index e04b20625cb9..000b350d4060 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node, imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); - /* - * Return here, either because 'imc' directory already exists, - * Or failed to create a new one. - */ if (!imc_debugfs_parent) return; @@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, } pmu_ptr->imc_counter_mmaped = true; - export_imc_mode_and_cmd(node, pmu_ptr); kfree(base_addr_arr); kfree(chipid_arr); return 0; @@ -151,7 +146,7 @@ error: * and domain as the inputs. * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets */ -static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) +static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain) { int ret = 0; struct imc_pmu *pmu_ptr; @@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) /* Return for unknown domain */ if (domain < 0) - return -EINVAL; + return NULL; /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) - return -ENOMEM; + return NULL; /* Set the domain */ pmu_ptr->domain = domain; ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); - if (ret) { - ret = -EINVAL; + if (ret) goto free_pmu; - } if (!of_property_read_u32(parent, "offset", &offset)) { - if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { - ret = -EINVAL; + if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) goto free_pmu; - } } /* Function to register IMC pmu */ @@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) if (pmu_ptr->domain == IMC_DOMAIN_NEST) kfree(pmu_ptr->mem_info); kfree(pmu_ptr); - return ret; + return NULL; } - return 0; + return pmu_ptr; free_pmu: kfree(pmu_ptr); - return ret; + return NULL; } static void disable_nest_pmu_counters(void) @@ -254,6 +245,7 @@ int get_max_nest_dev(void) static int opal_imc_counters_probe(struct platform_device *pdev) { struct device_node *imc_dev = pdev->dev.of_node; + struct imc_pmu *pmu; int pmu_count = 0, domain; bool core_imc_reg = false, thread_imc_reg = false; u32 type; @@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { + pmu = NULL; if (of_property_read_u32(imc_dev, "type", &type)) { pr_warn("IMC Device without type property\n"); continue; @@ -285,7 +278,14 @@ static int opal_imc_counters_probe(struct platform_device *pdev) domain = IMC_DOMAIN_THREAD; break; case IMC_TYPE_TRACE: - domain = IMC_DOMAIN_TRACE; + /* + * FIXME. Using trace_imc events to monitor application + * or KVM thread performance can cause a checkstop + * (system crash). + * Disable it for now. + */ + pr_info_once("IMC: disabling trace_imc PMU\n"); + domain = -1; break; default: pr_warn("IMC Unknown Device type \n"); @@ -293,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) { - if (domain == IMC_DOMAIN_NEST) + pmu = imc_pmu_create(imc_dev, pmu_count, domain); + if (pmu != NULL) { + if (domain == IMC_DOMAIN_NEST) { + if (!imc_debugfs_parent) + export_imc_mode_and_cmd(imc_dev, pmu); pmu_count++; + } if (domain == IMC_DOMAIN_CORE) core_imc_reg = true; if (domain == IMC_DOMAIN_THREAD) @@ -303,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev) } } - /* If none of the nest units are registered, remove debugfs interface */ - if (pmu_count == 0) - debugfs_remove_recursive(imc_debugfs_parent); - /* If core imc is not registered, unregister thread-imc */ if (!core_imc_reg && thread_imc_reg) unregister_thread_imc(); diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 33c10749edec..55dc61cb4867 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -392,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->esb_shift = esb_shift; data->trig_page = trig_page; + data->hw_irq = hw_irq; + /* * No chip-id for the sPAPR backend. This has an impact how we * pick a target. See xive_pick_irq_target(). */ data->src_chip = XIVE_INVALID_CHIP_ID; + /* + * When the H_INT_ESB flag is set, the H_INT_ESB hcall should + * be used for interrupt management. Skip the remapping of the + * ESB pages which are not available. + */ + if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB) + return 0; + data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); if (!data->eoi_mmio) { pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); return -ENOMEM; } - data->hw_irq = hw_irq; - /* Full function page supports trigger */ if (flags & XIVE_SRC_TRIGGER) { data->trig_mmio = data->eoi_mmio; diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index eb7eed43e780..431e208a5ea4 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -241,7 +241,9 @@ static inline void arch___clear_bit_unlock(unsigned long nr, arch___clear_bit(nr, ptr); } -#include <asm-generic/bitops-instrumented.h> +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-non-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> /* * Functions which use MSB0 bit numbering. diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 7d1f6a49bfae..062cdecb2f24 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -388,7 +388,9 @@ static __always_inline int fls64(__u64 x) #include <asm-generic/bitops/const_hweight.h> -#include <asm-generic/bitops-instrumented.h> +#include <asm-generic/bitops/instrumented-atomic.h> +#include <asm-generic/bitops/instrumented-non-atomic.h> +#include <asm-generic/bitops/instrumented-lock.h> #include <asm-generic/bitops/le.h> |