diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/asm-uaccess.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/barrier.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/cache.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpucaps.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/daifflags.h | 19 | ||||
-rw-r--r-- | arch/arm64/include/asm/exception.h | 22 | ||||
-rw-r--r-- | arch/arm64/include/asm/ftrace.h | 23 | ||||
-rw-r--r-- | arch/arm64/include/asm/insn.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/irqflags.h | 19 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/module.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 17 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/traps.h | 10 |
17 files changed, 146 insertions, 43 deletions
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f74909ba29bd..a70575edae8e 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -74,14 +74,4 @@ alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) alternative_else_nop_endif .endm - -/* - * Remove the address tag from a virtual address, if present. - */ - .macro clear_address_tag, dst, addr - tst \addr, #(1 << 55) - bic \dst, \addr, #(0xff << 56) - csel \dst, \dst, \addr, eq - .endm - #endif diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index e0e2b1946f42..7d9cc5ec4971 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -29,6 +29,18 @@ SB_BARRIER_INSN"nop\n", \ ARM64_HAS_SB)) +#ifdef CONFIG_ARM64_PSEUDO_NMI +#define pmr_sync() \ + do { \ + extern struct static_key_false gic_pmr_sync; \ + \ + if (static_branch_unlikely(&gic_pmr_sync)) \ + dsb(sy); \ + } while(0) +#else +#define pmr_sync() do {} while (0) +#endif + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 43da6dd29592..806e9dc2a852 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -11,6 +11,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -18,7 +19,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f19fe4b9acc4..b92683871119 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -52,7 +52,11 @@ #define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_DCPODP 43 #define ARM64_WORKAROUND_1463225 44 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 +#define ARM64_WORKAROUND_1542419 47 +#define ARM64_WORKAROUND_1319367 48 -#define ARM64_NCAPS 45 +#define ARM64_NCAPS 49 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9cde5d2e768f..4261d55e8506 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -659,6 +659,20 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) default: return CONFIG_ARM64_PA_BITS; } } + +/* Check whether hardware update of the Access flag is supported */ +static inline bool cpu_has_hw_af(void) +{ + u64 mmfr1; + + if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) + return false; + + mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HADBS_SHIFT); +} + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 063c964af705..72acd2db167f 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -8,7 +8,9 @@ #include <linux/irqflags.h> #include <asm/arch_gicv3.h> +#include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/ptrace.h> #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT @@ -65,7 +67,7 @@ static inline void local_daif_restore(unsigned long flags) if (system_uses_irq_prio_masking()) { gic_write_pmr(GIC_PRIO_IRQON); - dsb(sy); + pmr_sync(); } } else if (system_uses_irq_prio_masking()) { u64 pmr; @@ -109,4 +111,19 @@ static inline void local_daif_restore(unsigned long flags) trace_hardirqs_off(); } +/* + * Called by synchronous exception handlers to restore the DAIF bits that were + * modified by taking an exception. + */ +static inline void local_daif_inherit(struct pt_regs *regs) +{ + unsigned long flags = regs->pstate & DAIF_MASK; + + /* + * We can't use local_daif_restore(regs->pstate) here as + * system_has_prio_mask_debugging() won't restore the I bit if it can + * use the pmr instead. + */ + write_sysreg(flags, daif); +} #endif diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index a17393ff6677..4d5f3b5f50cd 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -8,14 +8,15 @@ #define __ASM_EXCEPTION_H #include <asm/esr.h> +#include <asm/kprobes.h> +#include <asm/ptrace.h> #include <linux/interrupt.h> -#define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER #define __exception_irq_entry __irq_entry #else -#define __exception_irq_entry __exception +#define __exception_irq_entry __kprobes #endif static inline u32 disr_to_esr(u64 disr) @@ -31,5 +32,22 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void enter_from_user_mode(void); +void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_undefinstr(struct pt_regs *regs); +asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + struct pt_regs *regs); +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); +void do_sve_acc(unsigned int esr, struct pt_regs *regs); +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); +void do_sysinstr(unsigned int esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); +void do_cp15instr(unsigned int esr, struct pt_regs *regs); +void el0_svc_handler(struct pt_regs *regs); +void el0_svc_compat_handler(struct pt_regs *regs); +void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, + struct pt_regs *regs); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index d48667b04c41..91fa4baa1a93 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -11,9 +11,20 @@ #include <asm/insn.h> #define HAVE_FUNCTION_GRAPH_FP_TEST + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#else #define MCOUNT_ADDR ((unsigned long)_mcount) +#endif + +/* The BL at the callsite's adjusted rec->ip */ #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE +#define FTRACE_PLT_IDX 0 +#define FTRACE_REGS_PLT_IDX 1 +#define NR_FTRACE_PLTS 2 + /* * Currently, gcc tends to save the link register after the local variables * on the stack. This causes the max stack tracer to report the function @@ -44,12 +55,24 @@ extern void return_to_handler(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* + * Adjust addr to point at the BL in the callsite. + * See ftrace_init_nop() for the callsite sequence. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + return addr + AARCH64_INSN_SIZE; + /* * addr is the address of the mcount call instruction. * recordmcount does the necessary offset calculation. */ return addr; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #define ftrace_return_address(n) return_address(n) /* diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 39e7780bedd6..bb313dde58a4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -440,6 +440,9 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_logic_type type); +u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant); u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, enum aarch64_insn_variant variant, enum aarch64_insn_register Rn, diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 1a59f0ed1ae3..aa4b6521ef14 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -6,6 +6,7 @@ #define __ASM_IRQFLAGS_H #include <asm/alternative.h> +#include <asm/barrier.h> #include <asm/ptrace.h> #include <asm/sysreg.h> @@ -34,14 +35,14 @@ static inline void arch_local_irq_enable(void) } asm volatile(ALTERNATIVE( - "msr daifclr, #2 // arch_local_irq_enable\n" - "nop", - __msr_s(SYS_ICC_PMR_EL1, "%0") - "dsb sy", + "msr daifclr, #2 // arch_local_irq_enable", + __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); + + pmr_sync(); } static inline void arch_local_irq_disable(void) @@ -116,14 +117,14 @@ static inline unsigned long arch_local_irq_save(void) static inline void arch_local_irq_restore(unsigned long flags) { asm volatile(ALTERNATIVE( - "msr daif, %0\n" - "nop", - __msr_s(SYS_ICC_PMR_EL1, "%0") - "dsb sy", - ARM64_HAS_IRQ_PRIO_MASKING) + "msr daif, %0", + __msr_s(SYS_ICC_PMR_EL1, "%0"), + ARM64_HAS_IRQ_PRIO_MASKING) : : "r" (flags) : "memory"); + + pmr_sync(); } #endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f656169db8c3..5ecb091c8576 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -600,8 +600,7 @@ static inline void kvm_arm_vhe_guest_enter(void) * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. */ - if (system_uses_irq_prio_masking()) - dsb(sy); + pmr_sync(); } static inline void kvm_arm_vhe_guest_exit(void) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 4867e58dbc9c..a4f9ca5479b0 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -209,12 +209,18 @@ static inline unsigned long kaslr_offset(void) * up with a tagged userland pointer. Clear the tag to get a sane pointer to * pass on to access_ok(), for instance. */ -#define untagged_addr(addr) \ +#define __untagged_addr(addr) \ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) +#define untagged_addr(addr) ({ \ + u64 __addr = (__force u64)addr; \ + __addr &= __untagged_addr(__addr); \ + (__force __typeof__(addr))__addr; \ +}) + #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -#define __tag_reset(addr) untagged_addr(addr) +#define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else #define __tag_shifted(tag) 0UL diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index f80e13cbf8ec..1e93de68c044 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -21,7 +21,7 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - struct plt_entry *ftrace_trampoline; + struct plt_entry *ftrace_trampolines; }; #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4438a23f969c..5716fe86e7b9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -876,15 +876,26 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) -#define kc_offset_to_vaddr(o) ((o) | PAGE_END) - #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) #else #define phys_to_ttbr(addr) (addr) #endif +/* + * On arm64 without hardware Access Flag, copying from user will fail because + * the pte is old and cannot be marked young. So we always end up with zeroed + * page after fork() + CoW for pfn mappings. We don't always have a + * hardware-managed access flag on arm64. + */ +static inline bool arch_faults_on_old_pte(void) +{ + WARN_ON(preemptible()); + + return !cpu_has_hw_af(); +} +#define arch_faults_on_old_pte arch_faults_on_old_pte + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 586fcd4b1965..5ba63204d078 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -26,10 +26,12 @@ #include <linux/init.h> #include <linux/stddef.h> #include <linux/string.h> +#include <linux/thread_info.h> #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/hw_breakpoint.h> +#include <asm/kasan.h> #include <asm/lse.h> #include <asm/pgtable-hwdef.h> #include <asm/pointer_auth.h> @@ -214,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->sp = sp; } +static inline bool is_ttbr0_addr(unsigned long addr) +{ + /* entry assembly clears tags for TTBR0 addrs */ + return addr < TASK_SIZE; +} + +static inline bool is_ttbr1_addr(unsigned long addr) +{ + /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ + return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; +} + #ifdef CONFIG_COMPAT static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 972d196c7714..6e919fafb43d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -212,7 +212,7 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) -#define SYS_PAR_EL1_F BIT(1) +#define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 59690613ac31..cee5928e1b7d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__irqentry_text_end; } -static inline int in_exception_text(unsigned long ptr) -{ - int in; - - in = ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; - - return in ? : __in_irqentry_text(ptr); -} - static inline int in_entry_text(unsigned long ptr) { return ptr >= (unsigned long)&__entry_text_start && |