diff options
Diffstat (limited to 'arch/arm64/include')
87 files changed, 1258 insertions, 1157 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index c52e151afab0..d3077c991962 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -3,7 +3,7 @@ generic-y += bugs.h generic-y += delay.h generic-y += div64.h generic-y += dma.h -generic-y += dma-contiguous.h +generic-y += dma-mapping.h generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += hw_irq.h @@ -15,7 +15,6 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h -generic-y += msi.h generic-y += qrwlock.h generic-y += qspinlock.h generic-y += serial.h diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index b9f8d787eea9..324e7d5ab37e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature,cb) \ +#define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { } * * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ + ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ "663:\n\t" \ "664:\n\t" \ - ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) #else #include <asm/assembler.h> diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 89e4c8b79349..25fec4bde43a 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -140,10 +140,11 @@ static inline u32 gic_read_rpr(void) #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) #define gicr_read_pendbaser(c) readq_relaxed(c) -#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c) +#define gicr_write_vpropbaser(v, c) writeq_relaxed(v, c) +#define gicr_read_vpropbaser(c) readq_relaxed(c) -#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) -#define gits_read_vpendbaser(c) readq_relaxed(c) +#define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c) +#define gicr_read_vpendbaser(c) readq_relaxed(c) static inline bool gic_prio_masking_enabled(void) { diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h new file mode 100644 index 000000000000..3fe02da70004 --- /dev/null +++ b/arch/arm64/include/asm/archrandom.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARCHRANDOM_H +#define _ASM_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include <linux/random.h> +#include <asm/cpufeature.h> + +static inline bool __arm64_rndr(unsigned long *v) +{ + bool ok; + + /* + * Reads of RNDR set PSTATE.NZCV to 0b0000 on success, + * and set PSTATE.NZCV to 0b0100 otherwise. + */ + asm volatile( + __mrs_s("%0", SYS_RNDR_EL0) "\n" + " cset %w1, ne\n" + : "=r" (*v), "=r" (ok) + : + : "cc"); + + return ok; +} + +static inline bool __must_check arch_get_random_long(unsigned long *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_int(unsigned int *v) +{ + return false; +} + +static inline bool __must_check arch_get_random_seed_long(unsigned long *v) +{ + /* + * Only support the generic interface after we have detected + * the system wide capability, avoiding complexity with the + * cpufeature code and with potential scheduling between CPUs + * with and without the feature. + */ + if (!cpus_have_const_cap(ARM64_HAS_RNG)) + return false; + + return __arm64_rndr(v); +} + + +static inline bool __must_check arch_get_random_seed_int(unsigned int *v) +{ + unsigned long val; + bool ok = arch_get_random_seed_long(&val); + + *v = val; + return ok; +} + +static inline bool __init __early_cpu_has_rndr(void) +{ + /* Open code as we run prior to the first call to cpufeature. */ + unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); + return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; +} + +#else + +static inline bool __arm64_rndr(unsigned long *v) { return false; } +static inline bool __init __early_cpu_has_rndr(void) { return false; } + +#endif /* CONFIG_ARCH_RANDOM */ +#endif /* _ASM_ARCHRANDOM_H */ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f74909ba29bd..f68a0e64482a 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -58,30 +58,4 @@ alternative_else_nop_endif .endm #endif -/* - * These macros are no-ops when UAO is present. - */ - .macro uaccess_disable_not_uao, tmp1, tmp2 - uaccess_ttbr0_disable \tmp1, \tmp2 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(1) -alternative_else_nop_endif - .endm - - .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 - uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(0) -alternative_else_nop_endif - .endm - -/* - * Remove the address tag from a virtual address, if present. - */ - .macro clear_address_tag, dst, addr - tst \addr, #(1 << 55) - bic \dst, \addr, #(0xff << 56) - csel \dst, \dst, \addr, eq - .endm - #endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e3a15c751b13..aca337d79d12 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -40,12 +40,6 @@ msr daif, \flags .endm - /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */ - .macro inherit_daif, pstate:req, tmp:req - and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) - msr daif, \tmp - .endm - /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ .macro enable_da_f msr daifclr, #(8 | 4 | 1) @@ -86,13 +80,6 @@ .endm /* - * SMP data memory barrier - */ - .macro smp_dmb, opt - dmb \opt - .endm - -/* * RAS Error Synchronization barrier */ .macro esb @@ -124,17 +111,6 @@ alternative_endif .endm /* - * Sanitise a 64-bit bounded index wrt speculation, returning zero if out - * of bounds. - */ - .macro mask_nospec64, idx, limit, tmp - sub \tmp, \idx, \limit - bic \tmp, \tmp, \idx - and \idx, \idx, \tmp, asr #63 - csdb - .endm - -/* * NOP sequence */ .macro nops, num @@ -350,6 +326,13 @@ alternative_endif .endm /* + * tcr_set_t1sz - update TCR.T1SZ + */ + .macro tcr_set_t1sz, valreg, t1sz + bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH + .endm + +/* * tcr_compute_pa_size - set TCR.(I)PS to the highest supported * ID_AA64MMFR0_EL1.PARange value * @@ -466,17 +449,6 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* - * Annotate a function as position independent, i.e., safe to be called before - * the kernel virtual mapping is activated. - */ -#define ENDPIPROC(x) \ - .globl __pi_##x; \ - .type __pi_##x, %function; \ - .set __pi_##x, x; \ - .size __pi_##x, . - x; \ - ENDPROC(x) - -/* * Annotate a function as being unsuitable for kprobes. */ #ifdef CONFIG_KPROBES @@ -538,9 +510,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * In future this may be nop'ed out when dealing with 52-bit kernel VAs. * ttbr: Value of ttbr to set, modified. */ - .macro offset_ttbr1, ttbr -#ifdef CONFIG_ARM64_USER_VA_BITS_52 + .macro offset_ttbr1, ttbr, tmp +#ifdef CONFIG_ARM64_VA_BITS_52 + mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 + and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + cbnz \tmp, .Lskipoffs_\@ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET +.Lskipoffs_\@ : #endif .endm @@ -550,7 +526,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * to be nop'ed out when dealing with 52-bit kernel VAs. */ .macro restore_ttbr1, ttbr -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET #endif .endm @@ -699,8 +675,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * where <label> is optional, and marks the point where execution will resume * after a yield has been performed. If omitted, execution resumes right after * the endif_yield_neon invocation. Note that the entire sequence, including - * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT - * is not defined. + * the provided patchup code, will be omitted from the image if + * CONFIG_PREEMPTION is not defined. * * As a convenience, in the case where no patchup code is required, the above * sequence may be abbreviated to @@ -728,7 +704,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm .macro if_will_cond_yield_neon -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 657b0457d83c..9543b5e0534d 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -13,21 +13,91 @@ #include <linux/types.h> #include <asm/barrier.h> +#include <asm/cmpxchg.h> #include <asm/lse.h> -#ifdef __KERNEL__ - -#define __ARM64_IN_ATOMIC_IMPL - -#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) -#include <asm/atomic_lse.h> -#else -#include <asm/atomic_ll_sc.h> -#endif - -#undef __ARM64_IN_ATOMIC_IMPL - -#include <asm/cmpxchg.h> +#define ATOMIC_OP(op) \ +static inline void arch_##op(int i, atomic_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC_OP(atomic_andnot) +ATOMIC_OP(atomic_or) +ATOMIC_OP(atomic_xor) +ATOMIC_OP(atomic_add) +ATOMIC_OP(atomic_and) +ATOMIC_OP(atomic_sub) + +#undef ATOMIC_OP + +#define ATOMIC_FETCH_OP(name, op) \ +static inline int arch_##op##name(int i, atomic_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC_FETCH_OPS(op) \ + ATOMIC_FETCH_OP(_relaxed, op) \ + ATOMIC_FETCH_OP(_acquire, op) \ + ATOMIC_FETCH_OP(_release, op) \ + ATOMIC_FETCH_OP( , op) + +ATOMIC_FETCH_OPS(atomic_fetch_andnot) +ATOMIC_FETCH_OPS(atomic_fetch_or) +ATOMIC_FETCH_OPS(atomic_fetch_xor) +ATOMIC_FETCH_OPS(atomic_fetch_add) +ATOMIC_FETCH_OPS(atomic_fetch_and) +ATOMIC_FETCH_OPS(atomic_fetch_sub) +ATOMIC_FETCH_OPS(atomic_add_return) +ATOMIC_FETCH_OPS(atomic_sub_return) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_FETCH_OPS + +#define ATOMIC64_OP(op) \ +static inline void arch_##op(long i, atomic64_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC64_OP(atomic64_andnot) +ATOMIC64_OP(atomic64_or) +ATOMIC64_OP(atomic64_xor) +ATOMIC64_OP(atomic64_add) +ATOMIC64_OP(atomic64_and) +ATOMIC64_OP(atomic64_sub) + +#undef ATOMIC64_OP + +#define ATOMIC64_FETCH_OP(name, op) \ +static inline long arch_##op##name(long i, atomic64_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC64_FETCH_OPS(op) \ + ATOMIC64_FETCH_OP(_relaxed, op) \ + ATOMIC64_FETCH_OP(_acquire, op) \ + ATOMIC64_FETCH_OP(_release, op) \ + ATOMIC64_FETCH_OP( , op) + +ATOMIC64_FETCH_OPS(atomic64_fetch_andnot) +ATOMIC64_FETCH_OPS(atomic64_fetch_or) +ATOMIC64_FETCH_OPS(atomic64_fetch_xor) +ATOMIC64_FETCH_OPS(atomic64_fetch_add) +ATOMIC64_FETCH_OPS(atomic64_fetch_and) +ATOMIC64_FETCH_OPS(atomic64_fetch_sub) +ATOMIC64_FETCH_OPS(atomic64_add_return) +ATOMIC64_FETCH_OPS(atomic64_sub_return) + +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_FETCH_OPS + +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + return __lse_ll_sc_body(atomic64_dec_if_positive, v); +} #define ATOMIC_INIT(i) { (i) } @@ -157,5 +227,4 @@ #include <asm-generic/atomic-instrumented.h> -#endif -#endif +#endif /* __ASM_ATOMIC_H */ diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index c8c850bc3dfb..13869b76b58c 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -10,83 +10,92 @@ #ifndef __ASM_ATOMIC_LL_SC_H #define __ASM_ATOMIC_LL_SC_H -#ifndef __ARM64_IN_ATOMIC_IMPL -#error "please don't include this file directly" +#include <linux/stringify.h> + +#ifdef CONFIG_ARM64_LSE_ATOMICS +#define __LL_SC_FALLBACK(asm_ops) \ +" b 3f\n" \ +" .subsection 1\n" \ +"3:\n" \ +asm_ops "\n" \ +" b 4f\n" \ +" .previous\n" \ +"4:\n" +#else +#define __LL_SC_FALLBACK(asm_ops) asm_ops +#endif + +#ifndef CONFIG_CC_HAS_K_CONSTRAINT +#define K #endif /* * AArch64 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. - * - * NOTE: these functions do *not* follow the PCS and must explicitly - * save any clobbered registers other than x0 (regardless of return - * value). This is achieved through -fcall-saved-* compiler flags for - * this file, which unfortunately don't work on a per-function basis - * (the optimize attribute silently ignores these options). */ -#define ATOMIC_OP(op, asm_op) \ -__LL_SC_INLINE void \ -__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ +#define ATOMIC_OP(op, asm_op, constraint) \ +static inline void \ +__ll_sc_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ asm volatile("// atomic_" #op "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ +" cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ -__LL_SC_EXPORT(arch_atomic_##op); + : __stringify(constraint) "r" (i)); \ +} -#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ -__LL_SC_INLINE int \ -__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ +#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ +static inline int \ +__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " st" #rel "xr %w1, %w0, %2\n" \ " cbnz %w1, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic_##op##_return##name); +} -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ -__LL_SC_INLINE int \ -__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ +static inline int \ +__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ int val, result; \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %w0, %3\n" \ " " #asm_op " %w1, %w0, %w4\n" \ " st" #rel "xr %w2, %w1, %3\n" \ " cbnz %w2, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic_fetch_##op##name); +} #define ATOMIC_OPS(...) \ ATOMIC_OP(__VA_ARGS__) \ @@ -99,8 +108,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name); ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, sub) +ATOMIC_OPS(add, add, I) +ATOMIC_OPS(sub, sub, J) #undef ATOMIC_OPS #define ATOMIC_OPS(...) \ @@ -110,77 +119,82 @@ ATOMIC_OPS(sub, sub) ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(and, and) -ATOMIC_OPS(andnot, bic) -ATOMIC_OPS(or, orr) -ATOMIC_OPS(xor, eor) +ATOMIC_OPS(and, and, K) +ATOMIC_OPS(or, orr, K) +ATOMIC_OPS(xor, eor, K) +/* + * GAS converts the mysterious and undocumented BIC (immediate) alias to + * an AND (immediate) instruction with the immediate inverted. We don't + * have a constraint for this, so fall back to register. + */ +ATOMIC_OPS(andnot, bic, ) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC64_OP(op, asm_op) \ -__LL_SC_INLINE void \ -__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ +#define ATOMIC64_OP(op, asm_op, constraint) \ +static inline void \ +__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ +" cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ -__LL_SC_EXPORT(arch_atomic64_##op); + : __stringify(constraint) "r" (i)); \ +} -#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ -__LL_SC_INLINE s64 \ -__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ +#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ +static inline long \ +__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " st" #rel "xr %w1, %0, %2\n" \ " cbnz %w1, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic64_##op##_return##name); +} -#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ -__LL_SC_INLINE s64 \ -__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ +#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ +static inline long \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %0, %3\n" \ " " #asm_op " %1, %0, %4\n" \ " st" #rel "xr %w2, %1, %3\n" \ " cbnz %w2, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic64_fetch_##op##name); +} #define ATOMIC64_OPS(...) \ ATOMIC64_OP(__VA_ARGS__) \ @@ -193,8 +207,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name); ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(add, add) -ATOMIC64_OPS(sub, sub) +ATOMIC64_OPS(add, add, I) +ATOMIC64_OPS(sub, sub, J) #undef ATOMIC64_OPS #define ATOMIC64_OPS(...) \ @@ -204,23 +218,29 @@ ATOMIC64_OPS(sub, sub) ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(and, and) -ATOMIC64_OPS(andnot, bic) -ATOMIC64_OPS(or, orr) -ATOMIC64_OPS(xor, eor) +ATOMIC64_OPS(and, and, L) +ATOMIC64_OPS(or, orr, L) +ATOMIC64_OPS(xor, eor, L) +/* + * GAS converts the mysterious and undocumented BIC (immediate) alias to + * an AND (immediate) instruction with the immediate inverted. We don't + * have a constraint for this, so fall back to register. + */ +ATOMIC64_OPS(andnot, bic, ) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -__LL_SC_INLINE s64 -__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) +static inline s64 +__ll_sc_atomic64_dec_if_positive(atomic64_t *v) { s64 result; unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" + __LL_SC_FALLBACK( " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " subs %0, %0, #1\n" @@ -228,20 +248,19 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" " dmb ish\n" -"2:" +"2:") : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); return result; } -__LL_SC_EXPORT(arch_atomic64_dec_if_positive); -#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ -__LL_SC_INLINE u##sz \ -__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ +static inline u##sz \ +__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ - u##sz new)) \ + u##sz new) \ { \ unsigned long tmp; \ u##sz oldval; \ @@ -255,6 +274,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ old = (u##sz)old; \ \ asm volatile( \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %[v]\n" \ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ @@ -262,46 +282,51 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ - "2:" \ + "2:") \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ - : [old] "Kr" (old), [new] "r" (new) \ + : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ -} \ -__LL_SC_EXPORT(__cmpxchg_case_##name##sz); +} -__CMPXCHG_CASE(w, b, , 8, , , , ) -__CMPXCHG_CASE(w, h, , 16, , , , ) -__CMPXCHG_CASE(w, , , 32, , , , ) -__CMPXCHG_CASE( , , , 64, , , , ) -__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") -__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") -__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") -__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") -__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") -__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") -__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") -__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") -__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") -__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") +/* + * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly + * handle the 'K' constraint for the value 4294967295 - thus we use no + * constraint for 32 bit operations. + */ +__CMPXCHG_CASE(w, b, , 8, , , , , K) +__CMPXCHG_CASE(w, h, , 16, , , , , K) +__CMPXCHG_CASE(w, , , 32, , , , , K) +__CMPXCHG_CASE( , , , 64, , , , , L) +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K) +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K) +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K) +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K) +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K) +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K) +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K) +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, rel, cl) \ -__LL_SC_INLINE long \ -__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ +static inline long \ +__ll_sc__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ - volatile void *ptr)) \ + volatile void *ptr) \ { \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ @@ -311,18 +336,18 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ " st" #rel "xp %w0, %5, %6, %2\n" \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ - "2:" \ + "2:") \ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : cl); \ \ return ret; \ -} \ -__LL_SC_EXPORT(__cmpxchg_double##name); +} __CMPXCHG_DBL( , , , ) __CMPXCHG_DBL(_mb, dmb ish, l, "memory") #undef __CMPXCHG_DBL +#undef K #endif /* __ASM_ATOMIC_LL_SC_H */ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 69acb1c19a15..da3280f639cd 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -10,22 +10,14 @@ #ifndef __ASM_ATOMIC_LSE_H #define __ASM_ATOMIC_LSE_H -#ifndef __ARM64_IN_ATOMIC_IMPL -#error "please don't include this file directly" -#endif - -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) #define ATOMIC_OP(op, asm_op) \ -static inline void arch_atomic_##op(int i, atomic_t *v) \ +static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ -" " #asm_op " %w[i], %[v]\n") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS); \ + asm volatile( \ + __LSE_PREAMBLE \ +" " #asm_op " %w[i], %[v]\n" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v)); \ } ATOMIC_OP(andnot, stclr) @@ -36,21 +28,16 @@ ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_##op##name), \ - /* LSE atomics */ \ -" " #asm_op #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ + __LSE_PREAMBLE \ +" " #asm_op #mb " %w[i], %w[i], %[v]" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ @@ -68,23 +55,19 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ +static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ + u32 tmp; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(add_return##name) \ - __nops(1), \ - /* LSE atomics */ \ - " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ + __LSE_PREAMBLE \ + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ + " add %w[i], %w[i], %w[tmp]" \ + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_OP_ADD_RETURN(_relaxed, ) @@ -94,41 +77,28 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN -static inline void arch_atomic_and(int i, atomic_t *v) +static inline void __lse_atomic_and(int i, atomic_t *v) { - register int w0 asm ("w0") = i; - register atomic_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC(and) - __nops(1), - /* LSE atomics */ + asm volatile( + __LSE_PREAMBLE " mvn %w[i], %w[i]\n" - " stclr %w[i], %[v]") - : [i] "+&r" (w0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stclr %w[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_and##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " mvn %w[i], %w[i]\n" \ - " ldclr" #mb " %w[i], %w[i], %[v]") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldclr" #mb " %w[i], %w[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_FETCH_OP_AND(_relaxed, ) @@ -138,42 +108,31 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void arch_atomic_sub(int i, atomic_t *v) +static inline void __lse_atomic_sub(int i, atomic_t *v) { - register int w0 asm ("w0") = i; - register atomic_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC(sub) - __nops(1), - /* LSE atomics */ + asm volatile( + __LSE_PREAMBLE " neg %w[i], %w[i]\n" - " stadd %w[i], %[v]") - : [i] "+&r" (w0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stadd %w[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ +static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ + u32 tmp; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(sub_return##name) \ - __nops(2), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS , ##cl); \ + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ + " add %w[i], %w[i], %w[tmp]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_OP_SUB_RETURN(_relaxed, ) @@ -184,23 +143,17 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_sub##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[i], %[v]") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldadd" #mb " %w[i], %w[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_FETCH_OP_SUB(_relaxed, ) @@ -209,20 +162,15 @@ ATOMIC_FETCH_OP_SUB(_release, l, "memory") ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB -#undef __LL_SC_ATOMIC -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ -static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ +static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ -" " #asm_op " %[i], %[v]\n") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS); \ + asm volatile( \ + __LSE_PREAMBLE \ +" " #asm_op " %[i], %[v]\n" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v)); \ } ATOMIC64_OP(andnot, stclr) @@ -233,21 +181,16 @@ ATOMIC64_OP(add, stadd) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_##op##name), \ - /* LSE atomics */ \ -" " #asm_op #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ + __LSE_PREAMBLE \ +" " #asm_op #mb " %[i], %[i], %[v]" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ @@ -265,23 +208,19 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OPS #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ -static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ + unsigned long tmp; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(add_return##name) \ - __nops(1), \ - /* LSE atomics */ \ - " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ + __LSE_PREAMBLE \ + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ + " add %[i], %[i], %x[tmp]" \ + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_OP_ADD_RETURN(_relaxed, ) @@ -291,41 +230,28 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") #undef ATOMIC64_OP_ADD_RETURN -static inline void arch_atomic64_and(s64 i, atomic64_t *v) +static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { - register s64 x0 asm ("x0") = i; - register atomic64_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(and) - __nops(1), - /* LSE atomics */ + asm volatile( + __LSE_PREAMBLE " mvn %[i], %[i]\n" - " stclr %[i], %[v]") - : [i] "+&r" (x0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stclr %[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_and##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " mvn %[i], %[i]\n" \ - " ldclr" #mb " %[i], %[i], %[v]") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldclr" #mb " %[i], %[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) @@ -335,42 +261,31 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void arch_atomic64_sub(s64 i, atomic64_t *v) +static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { - register s64 x0 asm ("x0") = i; - register atomic64_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(sub) - __nops(1), - /* LSE atomics */ + asm volatile( + __LSE_PREAMBLE " neg %[i], %[i]\n" - " stadd %[i], %[v]") - : [i] "+&r" (x0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stadd %[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ + unsigned long tmp; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(sub_return##name) \ - __nops(2), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ + " add %[i], %[i], %x[tmp]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_OP_SUB_RETURN(_relaxed, ) @@ -381,23 +296,17 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") #undef ATOMIC64_OP_SUB_RETURN #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_sub##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %[i], %[v]") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldadd" #mb " %[i], %[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_FETCH_OP_SUB(_relaxed, ) @@ -407,54 +316,47 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") #undef ATOMIC64_FETCH_OP_SUB -static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) +static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { - register long x0 asm ("x0") = (long)v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(dec_if_positive) - __nops(6), - /* LSE atomics */ - "1: ldr x30, %[v]\n" - " subs %[ret], x30, #1\n" + unsigned long tmp; + + asm volatile( + __LSE_PREAMBLE + "1: ldr %x[tmp], %[v]\n" + " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" - " casal x30, %[ret], %[v]\n" - " sub x30, x30, #1\n" - " sub x30, x30, %[ret]\n" - " cbnz x30, 1b\n" - "2:") - : [ret] "+&r" (x0), [v] "+Q" (v->counter) + " casal %x[tmp], %[ret], %[v]\n" + " sub %x[tmp], %x[tmp], #1\n" + " sub %x[tmp], %x[tmp], %[ret]\n" + " cbnz %x[tmp], 1b\n" + "2:" + : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) : - : __LL_SC_CLOBBERS, "cc", "memory"); + : "cc", "memory"); - return x0; + return (long)v; } -#undef __LL_SC_ATOMIC64 - -#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) - #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ -static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ +static __always_inline u##sz \ +__lse__cmpxchg_case_##name##sz(volatile void *ptr, \ u##sz old, \ u##sz new) \ { \ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ register u##sz x1 asm ("x1") = old; \ register u##sz x2 asm ("x2") = new; \ + unsigned long tmp; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_CMPXCHG(name##sz) \ - __nops(2), \ - /* LSE atomics */ \ - " mov " #w "30, %" #w "[old]\n" \ - " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ - " mov %" #w "[ret], " #w "30") \ - : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ + asm volatile( \ + __LSE_PREAMBLE \ + " mov %" #w "[tmp], %" #w "[old]\n" \ + " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ + " mov %" #w "[ret], %" #w "[tmp]" \ + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \ + [tmp] "=&r" (tmp) \ : [old] "r" (x1), [new] "r" (x2) \ - : __LL_SC_CLOBBERS, ##cl); \ + : cl); \ \ return x0; \ } @@ -476,13 +378,11 @@ __CMPXCHG_CASE(w, h, mb_, 16, al, "memory") __CMPXCHG_CASE(w, , mb_, 32, al, "memory") __CMPXCHG_CASE(x, , mb_, 64, al, "memory") -#undef __LL_SC_CMPXCHG #undef __CMPXCHG_CASE -#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) - #define __CMPXCHG_DBL(name, mb, cl...) \ -static inline long __cmpxchg_double##name(unsigned long old1, \ +static __always_inline long \ +__lse__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ @@ -496,20 +396,17 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ register unsigned long x3 asm ("x3") = new2; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_CMPXCHG_DBL(name) \ - __nops(3), \ - /* LSE atomics */ \ + asm volatile( \ + __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ - " orr %[old1], %[old1], %[old2]") \ + " orr %[old1], %[old1], %[old2]" \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(unsigned long *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ - : __LL_SC_CLOBBERS, ##cl); \ + : cl); \ \ return x0; \ } @@ -517,7 +414,6 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ __CMPXCHG_DBL( , ) __CMPXCHG_DBL(_mb, al, "memory") -#undef __LL_SC_CMPXCHG_DBL #undef __CMPXCHG_DBL #endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index e0e2b1946f42..7d9cc5ec4971 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -29,6 +29,18 @@ SB_BARRIER_INSN"nop\n", \ ARM64_HAS_SB)) +#ifdef CONFIG_ARM64_PSEUDO_NMI +#define pmr_sync() \ + do { \ + extern struct static_key_false gic_pmr_sync; \ + \ + if (static_branch_unlikely(&gic_pmr_sync)) \ + dsb(sy); \ + } while(0) +#else +#define pmr_sync() do {} while (0) +#endif + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 64eeaa41e7ca..806e9dc2a852 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -11,6 +11,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -18,7 +19,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) @@ -78,7 +79,7 @@ static inline u32 cache_type_cwg(void) return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; } -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_mostly __section(.data..read_mostly) static inline int cache_line_size_of_cpu(void) { diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index d064a50deb5f..8d2a7de39744 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } #define ip_fast_csum ip_fast_csum +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + #include <asm-generic/checksum.h> #endif /* __ASM_CHECKSUM_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 7a299a20f6dc..f9bef42c1411 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -10,7 +10,6 @@ #include <linux/build_bug.h> #include <linux/compiler.h> -#include <asm/atomic.h> #include <asm/barrier.h> #include <asm/lse.h> @@ -63,7 +62,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ -static inline unsigned long __xchg##sfx(unsigned long x, \ +static __always_inline unsigned long __xchg##sfx(unsigned long x, \ volatile void *ptr, \ int size) \ { \ @@ -104,8 +103,52 @@ __XCHG_GEN(_mb) #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) +#define __CMPXCHG_CASE(name, sz) \ +static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ + u##sz old, \ + u##sz new) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ + ptr, old, new); \ +} + +__CMPXCHG_CASE( , 8) +__CMPXCHG_CASE( , 16) +__CMPXCHG_CASE( , 32) +__CMPXCHG_CASE( , 64) +__CMPXCHG_CASE(acq_, 8) +__CMPXCHG_CASE(acq_, 16) +__CMPXCHG_CASE(acq_, 32) +__CMPXCHG_CASE(acq_, 64) +__CMPXCHG_CASE(rel_, 8) +__CMPXCHG_CASE(rel_, 16) +__CMPXCHG_CASE(rel_, 32) +__CMPXCHG_CASE(rel_, 64) +__CMPXCHG_CASE(mb_, 8) +__CMPXCHG_CASE(mb_, 16) +__CMPXCHG_CASE(mb_, 32) +__CMPXCHG_CASE(mb_, 64) + +#undef __CMPXCHG_CASE + +#define __CMPXCHG_DBL(name) \ +static inline long __cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_double##name, \ + old1, old2, new1, new2, ptr); \ +} + +__CMPXCHG_DBL( ) +__CMPXCHG_DBL(_mb) + +#undef __CMPXCHG_DBL + #define __CMPXCHG_GEN(sfx) \ -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ unsigned long new, \ int size) \ @@ -212,7 +255,7 @@ __CMPWAIT_CASE( , , 64); #undef __CMPWAIT_CASE #define __CMPWAIT_GEN(sfx) \ -static inline void __cmpwait##sfx(volatile void *ptr, \ +static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unsigned long val, \ int size) \ { \ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index fb8ad4616b3b..935d2aa231bf 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -4,7 +4,9 @@ */ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H -#ifdef __KERNEL__ + +#include <asm-generic/compat.h> + #ifdef CONFIG_COMPAT /* @@ -14,8 +16,6 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> -#include <asm-generic/compat.h> - #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ #define COMPAT_UTS_MACHINE "armv8b\0\0" @@ -114,23 +114,6 @@ typedef u32 compat_sigset_word; #define COMPAT_OFF_T_MAX 0x7fffffff -/* - * A pointer passed in from user mode. This should not - * be used for syscall parameters, just declare them - * as pointers because the syscall entry code will have - * appropriately converted them already. - */ - -static inline void __user *compat_ptr(compat_uptr_t uptr) -{ - return (void __user *)(unsigned long)uptr; -} - -static inline compat_uptr_t ptr_to_compat(void __user *uptr) -{ - return (u32)(unsigned long)uptr; -} - #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) #define COMPAT_MINSIGSTKSZ 2048 @@ -215,5 +198,4 @@ static inline int is_compat_thread(struct thread_info *thread) } #endif /* CONFIG_COMPAT */ -#endif /* __KERNEL__ */ #endif /* __ASM_COMPAT_H */ diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index d72d995b7e25..b4a40535a3d8 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u32 reg_id_isar3; u32 reg_id_isar4; u32 reg_id_isar5; + u32 reg_id_isar6; u32 reg_id_mmfr0; u32 reg_id_mmfr1; u32 reg_id_mmfr2; diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index c09d633c3109..86aabf1e0199 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -23,6 +23,8 @@ * @cpu_boot: Boots a cpu into the kernel. * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary * synchronisation. Called from the cpu being booted. + * @cpu_can_disable: Determines whether a CPU can be disabled based on + * mechanism-specific information. * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific * reason, which will cause the hot unplug to be aborted. Called * from the cpu to be killed. @@ -42,6 +44,7 @@ struct cpu_operations { int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); #ifdef CONFIG_HOTPLUG_CPU + bool (*cpu_can_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu); diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f19fe4b9acc4..865e0253fc1e 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -44,7 +44,7 @@ #define ARM64_SSBS 34 #define ARM64_WORKAROUND_1418040 35 #define ARM64_HAS_SB 36 -#define ARM64_WORKAROUND_1165522 37 +#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37 #define ARM64_HAS_ADDRESS_AUTH_ARCH 38 #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_GENERIC_AUTH_ARCH 40 @@ -52,7 +52,13 @@ #define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_DCPODP 43 #define ARM64_WORKAROUND_1463225 44 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45 +#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 +#define ARM64_WORKAROUND_1542419 47 +#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 +#define ARM64_HAS_E0PD 49 +#define ARM64_HAS_RNG 50 -#define ARM64_NCAPS 45 +#define ARM64_NCAPS 51 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c96ffa4722d3..92ef9539874a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -289,9 +289,16 @@ struct arm64_cpu_capabilities { u16 type; bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); /* - * Take the appropriate actions to enable this capability for this CPU. - * For each successfully booted CPU, this method is called for each - * globally detected capability. + * Take the appropriate actions to configure this capability + * for this CPU. If the capability is detected by the kernel + * this will be called on all the CPUs in the system, + * including the hotplugged CPUs, regardless of whether the + * capability is available on that specific CPU. This is + * useful for some capabilities (e.g, working around CPU + * errata), where all the CPUs must take some action (e.g, + * changing system control/configuration). Thus, if an action + * is required only if the CPU has the capability, then the + * routine must check it before taking any action. */ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); union { @@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, return false; } -/* - * Take appropriate action for all matching entries in the shared capability - * entry. - */ -static inline void -cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) -{ - const struct arm64_cpu_capabilities *caps; - - for (caps = entry->match_list; caps->matches; caps++) - if (caps->matches(caps, SCOPE_LOCAL_CPU) && - caps->cpu_enable) - caps->cpu_enable(caps); -} - extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; @@ -621,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } +static inline bool system_capabilities_finalized(void) +{ + return static_branch_likely(&arm64_const_caps_ready); +} + #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 @@ -667,6 +664,20 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) default: return CONFIG_ARM64_PA_BITS; } } + +/* Check whether hardware update of the Access flag is supported */ +static inline bool cpu_has_hw_af(void) +{ + u64 mmfr1; + + if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) + return false; + + mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HADBS_SHIFT); +} + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index e7d46631cc42..a87a93f67671 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -51,14 +51,6 @@ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ -({ \ - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ - \ - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ - }) - #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 @@ -87,11 +79,14 @@ #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 #define CAVIUM_CPU_PART_THUNDERX2 0x0AF +#define BRCM_CPU_PART_BRAHMA_B53 0x100 #define BRCM_CPU_PART_VULCAN 0x516 #define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_KRYO 0x200 +#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 +#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 @@ -113,10 +108,13 @@ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) +#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53) #define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) +#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) @@ -159,10 +157,19 @@ struct midr_range { #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, + u32 rv_max) +{ + u32 _model = midr & MIDR_CPU_MODEL_MASK; + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); + + return _model == model && rv >= rv_min && rv <= rv_max; +} + static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) { - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, - range->rv_min, range->rv_max); + return midr_is_cpu_model_range(midr, range->model, + range->rv_min, range->rv_max); } static inline bool diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 063c964af705..ec213b4a1650 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -8,7 +8,9 @@ #include <linux/irqflags.h> #include <asm/arch_gicv3.h> +#include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/ptrace.h> #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT @@ -36,7 +38,7 @@ static inline void local_daif_mask(void) trace_hardirqs_off(); } -static inline unsigned long local_daif_save(void) +static inline unsigned long local_daif_save_flags(void) { unsigned long flags; @@ -48,6 +50,15 @@ static inline unsigned long local_daif_save(void) flags |= PSR_I_BIT; } + return flags; +} + +static inline unsigned long local_daif_save(void) +{ + unsigned long flags; + + flags = local_daif_save_flags(); + local_daif_mask(); return flags; @@ -65,7 +76,7 @@ static inline void local_daif_restore(unsigned long flags) if (system_uses_irq_prio_masking()) { gic_write_pmr(GIC_PRIO_IRQON); - dsb(sy); + pmr_sync(); } } else if (system_uses_irq_prio_masking()) { u64 pmr; @@ -109,4 +120,19 @@ static inline void local_daif_restore(unsigned long flags) trace_hardirqs_off(); } +/* + * Called by synchronous exception handlers to restore the DAIF bits that were + * modified by taking an exception. + */ +static inline void local_daif_inherit(struct pt_regs *regs) +{ + unsigned long flags = regs->pstate & DAIF_MASK; + + /* + * We can't use local_daif_restore(regs->pstate) here as + * system_has_prio_mask_debugging() won't restore the I bit if it can + * use the pmr instead. + */ + write_sysreg(flags, daif); +} #endif diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index d8ec5bb881c2..7619f473155f 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -5,8 +5,6 @@ #ifndef __ASM_DEBUG_MONITORS_H #define __ASM_DEBUG_MONITORS_H -#ifdef __KERNEL__ - #include <linux/errno.h> #include <linux/types.h> #include <asm/brk-imm.h> @@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) int aarch32_break_handler(struct pt_regs *regs); #endif /* __ASSEMBLY */ -#endif /* __KERNEL__ */ #endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h deleted file mode 100644 index bdcb0922a40c..000000000000 --- a/arch/arm64/include/asm/dma-mapping.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 ARM Ltd. - */ -#ifndef __ASM_DMA_MAPPING_H -#define __ASM_DMA_MAPPING_H - -#ifdef __KERNEL__ - -#include <linux/types.h> -#include <linux/vmalloc.h> - -#include <xen/xen.h> -#include <asm/xen/hypervisor.h> - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return NULL; -} - -/* - * Do not use this function in a driver, it is only provided for - * arch/arm/mm/xen.c, which is used by arm64 as well. - */ -static inline bool is_device_dma_coherent(struct device *dev) -{ - return dev->dma_coherent; -} - -#endif /* __KERNEL__ */ -#endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 76a144702586..44531a69d32b 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) /* * On arm64, we have to ensure that the initrd ends up in the linear region, - * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is + * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is * guaranteed to cover the kernel Image. * * Since the EFI stub is part of the kernel Image, we can relax the @@ -90,24 +90,20 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, unsigned long image_addr) { - return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1)); + return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); } -#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) -#define __efi_call_early(f, ...) f(__VA_ARGS__) -#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) -#define efi_is_64bit() (true) +#define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__) +#define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__) +#define efi_is_native() (true) -#define efi_table_attr(table, attr, instance) \ - ((table##_t *)instance)->attr +#define efi_table_attr(inst, attr) (inst->attr) -#define efi_call_proto(protocol, f, instance, ...) \ - ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) +#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__) #define alloc_screen_info(x...) &screen_info -static inline void free_screen_info(efi_system_table_t *sys_table_arg, - struct screen_info *si) +static inline void free_screen_info(struct screen_info *si) { } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 65ac18400979..cb29253ae86b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -34,7 +34,8 @@ #define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ #define ESR_ELx_EC_SYS64 (0x18) #define ESR_ELx_EC_SVE (0x19) -/* Unallocated EC: 0x1A - 0x1E */ +#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ +/* Unallocated EC: 0x1b - 0x1E */ #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ed57b760f38c..7a6e81ca23a8 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -8,14 +8,15 @@ #define __ASM_EXCEPTION_H #include <asm/esr.h> +#include <asm/kprobes.h> +#include <asm/ptrace.h> #include <linux/interrupt.h> -#define __exception __attribute__((section(".exception.text"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER #define __exception_irq_entry __irq_entry #else -#define __exception_irq_entry __exception +#define __exception_irq_entry __kprobes #endif static inline u32 disr_to_esr(u64 disr) @@ -30,4 +31,19 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +asmlinkage void enter_from_user_mode(void); +void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_undefinstr(struct pt_regs *regs); +asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + struct pt_regs *regs); +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); +void do_sve_acc(unsigned int esr, struct pt_regs *regs); +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); +void do_sysinstr(unsigned int esr, struct pt_regs *regs); +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); +void do_cp15instr(unsigned int esr, struct pt_regs *regs); +void do_el0_svc(struct pt_regs *regs); +void do_el0_svc_compat(struct pt_regs *regs); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b6a2c352f4c3..59f10dd13f12 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -21,7 +21,7 @@ #include <linux/stddef.h> #include <linux/types.h> -#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +#ifdef CONFIG_COMPAT /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_CTRL_MASK 0x07f79f00 diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5ab5200b2bdc..91fa4baa1a93 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -11,9 +11,33 @@ #include <asm/insn.h> #define HAVE_FUNCTION_GRAPH_FP_TEST + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#else #define MCOUNT_ADDR ((unsigned long)_mcount) +#endif + +/* The BL at the callsite's adjusted rec->ip */ #define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE +#define FTRACE_PLT_IDX 0 +#define FTRACE_REGS_PLT_IDX 1 +#define NR_FTRACE_PLTS 2 + +/* + * Currently, gcc tends to save the link register after the local variables + * on the stack. This causes the max stack tracer to report the function + * frame sizes for the wrong functions. By defining + * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect + * to find the return address on the stack after the local variables have + * been set up. + * + * Note, this may change in the future, and we will need to deal with that + * if it were to happen. + */ +#define ARCH_FTRACE_SHIFT_STACK_TRACER 1 + #ifndef __ASSEMBLY__ #include <linux/compat.h> @@ -31,12 +55,24 @@ extern void return_to_handler(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* + * Adjust addr to point at the BL in the callsite. + * See ftrace_init_nop() for the callsite sequence. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) + return addr + AARCH64_INSN_SIZE; + /* * addr is the address of the mcount call instruction. * recordmcount does the necessary offset calculation. */ return addr; } +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #define ftrace_return_address(n) return_address(n) /* diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 6211e3105491..6cc26a127819 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -5,8 +5,6 @@ #ifndef __ASM_FUTEX_H #define __ASM_FUTEX_H -#ifdef __KERNEL__ - #include <linux/futex.h> #include <linux/uaccess.h> @@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, return ret; } -#endif /* __KERNEL__ */ #endif /* __ASM_FUTEX_H */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index db9ab760e6fd..bc7aaed4b34e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -10,8 +10,6 @@ #include <asm/sysreg.h> #include <asm/virt.h> -#ifdef __KERNEL__ - struct arch_hw_breakpoint_ctrl { u32 __reserved : 19, len : 8, @@ -156,5 +154,4 @@ static inline int get_num_wrps(void) ID_AA64DFR0_WRPS_SHIFT); } -#endif /* __KERNEL__ */ #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 3d2f2472a36c..0f00265248b5 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -86,6 +86,14 @@ #define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4) #define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2) #define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT) +#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM) +#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM) +#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM) +#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16) +#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM) +#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16) +#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) +#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 39e7780bedd6..bb313dde58a4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -440,6 +440,9 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_logic_type type); +u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_variant variant); u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, enum aarch64_insn_variant variant, enum aarch64_insn_register Rn, diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 7ed92626949d..4e531f57147d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -8,8 +8,6 @@ #ifndef __ASM_IO_H #define __ASM_IO_H -#ifdef __KERNEL__ - #include <linux/types.h> #include <asm/byteorder.h> @@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) ({ \ unsigned long tmp; \ \ - rmb(); \ + dma_rmb(); \ \ /* \ * Create a dummy control dependency from the IO read to any \ @@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) }) #define __io_par(v) __iormb(v) -#define __iowmb() wmb() +#define __iowmb() dma_wmb() /* * Relaxed I/O memory access primitives. These follow the Device memory @@ -165,14 +163,11 @@ extern void __memset_io(volatile void __iomem *, int, size_t); * I/O memory mapping functions. */ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); -extern void __iounmap(volatile void __iomem *addr); +extern void iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) -#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -#define iounmap __iounmap /* * PCI configuration space mapping function. @@ -207,5 +202,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); -#endif /* __KERNEL__ */ #endif /* __ASM_IO_H */ diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 7872f260c9ee..aa4b6521ef14 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -5,9 +5,8 @@ #ifndef __ASM_IRQFLAGS_H #define __ASM_IRQFLAGS_H -#ifdef __KERNEL__ - #include <asm/alternative.h> +#include <asm/barrier.h> #include <asm/ptrace.h> #include <asm/sysreg.h> @@ -36,14 +35,14 @@ static inline void arch_local_irq_enable(void) } asm volatile(ALTERNATIVE( - "msr daifclr, #2 // arch_local_irq_enable\n" - "nop", - __msr_s(SYS_ICC_PMR_EL1, "%0") - "dsb sy", + "msr daifclr, #2 // arch_local_irq_enable", + __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); + + pmr_sync(); } static inline void arch_local_irq_disable(void) @@ -118,15 +117,14 @@ static inline unsigned long arch_local_irq_save(void) static inline void arch_local_irq_restore(unsigned long flags) { asm volatile(ALTERNATIVE( - "msr daif, %0\n" - "nop", - __msr_s(SYS_ICC_PMR_EL1, "%0") - "dsb sy", - ARM64_HAS_IRQ_PRIO_MASKING) + "msr daif, %0", + __msr_s(SYS_ICC_PMR_EL1, "%0"), + ARM64_HAS_IRQ_PRIO_MASKING) : : "r" (flags) : "memory"); + + pmr_sync(); } -#endif -#endif +#endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index b52aacd2c526..b0dc4abc3589 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -18,11 +18,8 @@ * KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). - */ -#define KASAN_SHADOW_START (VA_START) -#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) - -/* + * + * KASAN_SHADOW_OFFSET: * This value is used to map an address to the corresponding shadow * address by the following formula: * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET @@ -33,8 +30,8 @@ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ -#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ - (64 - KASAN_SHADOW_SCALE_SHIFT))) +#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) +#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 12a561a54128..d24b527e8c00 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {} struct kimage_arch { void *dtb; unsigned long dtb_mem; + /* Core ELF header buffer */ + void *elf_headers; + unsigned long elf_headers_mem; + unsigned long elf_headers_sz; }; extern const struct kexec_file_ops kexec_image_ops; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index ddf9d762ac62..6e5d839f42b5 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -61,7 +61,6 @@ * RW: 64bit by default, can be overridden for 32bit VMs * TAC: Trap ACTLR * TSC: Trap SMC - * TVM: Trap VM ops (until M+C set in SCTLR_EL1) * TSW: Trap cache operations by set/way * TWE: Trap WFE * TWI: Trap WFI @@ -74,7 +73,7 @@ * SWIO: Turn set/way invalidates into set/way clean+invalidate */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ - HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_FMO | HCR_IMO) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d69c1efc63e7..688c63412cc2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -17,7 +17,6 @@ #include <asm/esr.h> #include <asm/kvm_arm.h> #include <asm/kvm_hyp.h> -#include <asm/kvm_mmio.h> #include <asm/ptrace.h> #include <asm/cputype.h> #include <asm/virt.h> @@ -53,8 +52,18 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) /* trap error record accesses */ vcpu->arch.hcr_el2 |= HCR_TERR; } - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { vcpu->arch.hcr_el2 |= HCR_FWB; + } else { + /* + * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C + * get set in SCTLR_EL1 such that we can detect when the guest + * MMU gets turned on and do the necessary cache maintenance + * then. + */ + vcpu->arch.hcr_el2 |= HCR_TVM; + } if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) vcpu->arch.hcr_el2 &= ~HCR_RW; @@ -77,14 +86,19 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) return (unsigned long *)&vcpu->arch.hcr_el2; } -static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) +static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 &= ~HCR_TWE; + if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count)) + vcpu->arch.hcr_el2 &= ~HCR_TWI; + else + vcpu->arch.hcr_el2 |= HCR_TWI; } -static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) +static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 |= HCR_TWE; + vcpu->arch.hcr_el2 |= HCR_TWI; } static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) @@ -204,6 +218,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; } +/* + * The layout of SPSR for an AArch32 state is different when observed from an + * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 + * view given an AArch64 view. + * + * In ARM DDI 0487E.a see: + * + * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 + * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 + * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 + * + * Which show the following differences: + * + * | Bit | AA64 | AA32 | Notes | + * +-----+------+------+-----------------------------| + * | 24 | DIT | J | J is RES0 in ARMv8 | + * | 21 | SS | DIT | SS doesn't exist in AArch32 | + * + * ... and all other bits are (currently) common. + */ +static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) +{ + const unsigned long overlap = BIT(24) | BIT(21); + unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); + + spsr &= ~overlap; + + spsr |= dit << 21; + + return spsr; +} + static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) { u32 mode; @@ -258,11 +304,21 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); } +static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); +} + static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); } +static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); +} + static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; @@ -284,7 +340,7 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); } -static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) +static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f656169db8c3..d87aa609d2b6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,7 +24,6 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> -#include <asm/kvm_mmio.h> #include <asm/thread_info.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -44,6 +43,7 @@ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) +#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -52,7 +52,7 @@ int kvm_arm_init_sve(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); +void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); @@ -83,6 +83,14 @@ struct kvm_arch { /* Mandated version of PSCI */ u32 psci_version; + + /* + * If we encounter a data abort without valid instruction syndrome + * information, report this to user space. User space can (and + * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is + * supported. + */ + bool return_nisv_io_abort_to_user; }; #define KVM_NR_MEM_OBJS 40 @@ -316,9 +324,6 @@ struct kvm_vcpu_arch { /* Don't run the guest (internal implementation need) */ bool pause; - /* IO related fields */ - struct kvm_decode mmio_decode; - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; @@ -338,6 +343,13 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + + /* Guest PV state */ + struct { + u64 steal; + u64 last_steal; + gpa_t base; + } steal; }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ @@ -430,8 +442,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -struct kvm_vcpu *kvm_arm_get_running_vcpu(void); -struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); @@ -475,9 +485,38 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); +/* MMIO helpers */ +void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); +unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + phys_addr_t fault_ipa); + int kvm_perf_init(void); int kvm_perf_teardown(void); +long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); +gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); +void kvm_update_stolen_time(struct kvm_vcpu *vcpu); + +int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); + +static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ + vcpu_arch->steal.base = GPA_INVALID; +} + +static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return (vcpu_arch->steal.base != GPA_INVALID); +} + void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); @@ -510,7 +549,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * wrong, and hyp will crash and burn when it uses any * cpus_have_const_cap() wrapper. */ - BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); + BUG_ON(!system_capabilities_finalized()); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); /* @@ -534,7 +573,7 @@ static inline bool kvm_arch_requires_vhe(void) return true; /* Some implementations have defects that confine them to VHE */ - if (cpus_have_cap(ARM64_WORKAROUND_1165522)) + if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) return true; return false; @@ -600,8 +639,7 @@ static inline void kvm_arm_vhe_guest_enter(void) * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. */ - if (system_uses_irq_prio_masking()) - dsb(sy); + pmr_sync(); } static inline void kvm_arm_vhe_guest_exit(void) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 86825aa20852..a3a6a2ba9a63 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -47,30 +47,6 @@ #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) -/** - * hyp_alternate_select - Generates patchable code sequences that are - * used to switch between two implementations of a function, depending - * on the availability of a feature. - * - * @fname: a symbol name that will be defined as a function returning a - * function pointer whose type will match @orig and @alt - * @orig: A pointer to the default function, as returned by @fname when - * @cond doesn't hold - * @alt: A pointer to the alternate function, as returned by @fname - * when @cond holds - * @cond: a CPU feature (as described in asm/cpufeature.h) - */ -#define hyp_alternate_select(fname, orig, alt, cond) \ -typeof(orig) * __hyp_text fname(void) \ -{ \ - typeof(alt) *val = orig; \ - asm volatile(ALTERNATIVE("nop \n", \ - "mov %0, %1 \n", \ - cond) \ - : "+r" (val) : "r" (alt)); \ - return val; \ -} - int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); @@ -115,11 +91,11 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); /* - * ARM erratum 1165522 requires the actual execution of the above - * before we can switch to the EL1/EL0 translation regime used by + * ARM errata 1165522 and 1530923 require the actual execution of the + * above before we can switch to the EL1/EL0 translation regime used by * the guest. */ - asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522)); + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); } #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h deleted file mode 100644 index 02b5c48fd467..000000000000 --- a/arch/arm64/include/asm/kvm_mmio.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall <c.dall@virtualopensystems.com> - */ - -#ifndef __ARM64_KVM_MMIO_H__ -#define __ARM64_KVM_MMIO_H__ - -#include <linux/kvm_host.h> -#include <asm/kvm_arm.h> - -/* - * This is annoying. The mmio code requires this, even if we don't - * need any decoding. To be fixed. - */ -struct kvm_decode { - unsigned long rt; - bool sign_extend; -}; - -void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); -unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); - -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa); - -#endif /* __ARM64_KVM_MMIO_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index befe37d4bc0e..53d846f1bfe7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -91,6 +91,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_compute_layout(void); static inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1b266292f0be..ebee3113a62f 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -4,4 +4,20 @@ #define __ALIGN .align 2 #define __ALIGN_STR ".align 2" +/* + * Annotate a function as position independent, i.e., safe to be called before + * the kernel virtual mapping is activated. + */ +#define SYM_FUNC_START_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START(x) + +#define SYM_FUNC_START_WEAK_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_FUNC_START_WEAK(x) + +#define SYM_FUNC_END_PI(x) \ + SYM_FUNC_END(x); \ + SYM_FUNC_END_ALIAS(__pi_##x) + #endif diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 8262325e2fc6..d429f7701c36 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -2,56 +2,47 @@ #ifndef __ASM_LSE_H #define __ASM_LSE_H -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#include <asm/atomic_ll_sc.h> + +#ifdef CONFIG_ARM64_LSE_ATOMICS + +#define __LSE_PREAMBLE ".arch armv8-a+lse\n" #include <linux/compiler_types.h> #include <linux/export.h> +#include <linux/jump_label.h> #include <linux/stringify.h> #include <asm/alternative.h> +#include <asm/atomic_lse.h> #include <asm/cpucaps.h> -#ifdef __ASSEMBLER__ - -.arch_extension lse - -.macro alt_lse, llsc, lse - alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS -.endm +extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; +extern struct static_key_false arm64_const_caps_ready; -#else /* __ASSEMBLER__ */ +static inline bool system_uses_lse_atomics(void) +{ + return (static_branch_likely(&arm64_const_caps_ready)) && + static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); +} -__asm__(".arch_extension lse"); - -/* Move the ll/sc atomics out-of-line */ -#define __LL_SC_INLINE notrace -#define __LL_SC_PREFIX(x) __ll_sc_##x -#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x)) - -/* Macro for constructing calls to out-of-line ll/sc atomics */ -#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n" -#define __LL_SC_CLOBBERS "x16", "x17", "x30" +#define __lse_ll_sc_body(op, ...) \ +({ \ + system_uses_lse_atomics() ? \ + __lse_##op(__VA_ARGS__) : \ + __ll_sc_##op(__VA_ARGS__); \ +}) /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) - -#endif /* __ASSEMBLER__ */ -#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ - -#ifdef __ASSEMBLER__ + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS) -.macro alt_lse, llsc, lse - \llsc -.endm +#else /* CONFIG_ARM64_LSE_ATOMICS */ -#else /* __ASSEMBLER__ */ +static inline bool system_uses_lse_atomics(void) { return false; } -#define __LL_SC_INLINE static inline -#define __LL_SC_PREFIX(x) x -#define __LL_SC_EXPORT(x) +#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__) #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -#endif /* __ASSEMBLER__ */ -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ #endif /* __ASM_LSE_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fb04f10a78ab..a4f9ca5479b0 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -12,10 +12,10 @@ #include <linux/compiler.h> #include <linux/const.h> +#include <linux/sizes.h> #include <linux/types.h> #include <asm/bug.h> #include <asm/page-def.h> -#include <linux/sizes.h> /* * Size of the PCI I/O space. This must remain a power of two so that @@ -26,54 +26,63 @@ /* * VMEMMAP_SIZE - allows the whole linear region to be covered by * a struct page array + * + * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE + * needs to cover the memory region from the beginning of the 52-bit + * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to + * keep a constant PAGE_OFFSET and "fallback" to using the higher end + * of the VMEMMAP where 52-bit support is not available in hardware. */ -#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) +#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \ + >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)) /* - * PAGE_OFFSET - the virtual address of the start of the linear map (top - * (VA_BITS - 1)) - * KIMAGE_VADDR - the virtual address of the start of the kernel image + * PAGE_OFFSET - the virtual address of the start of the linear map, at the + * start of the TTBR1 address space. + * PAGE_END - the end of the linear map, where all other kernel mappings begin. + * KIMAGE_VADDR - the virtual address of the start of the kernel image. * VA_BITS - the maximum number of bits for virtual addresses. - * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define VA_START (UL(0xffffffffffffffff) - \ - (UL(1) << VA_BITS) + 1) -#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ - (UL(1) << (VA_BITS - 1)) + 1) +#define _PAGE_OFFSET(va) (-(UL(1) << (va))) +#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) +#define BPF_JIT_REGION_START (KASAN_SHADOW_END) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) -#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) +#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define KERNEL_START _text -#define KERNEL_END _end - -#ifdef CONFIG_ARM64_USER_VA_BITS_52 -#define MAX_USER_VA_BITS 52 +#if VA_BITS > 48 +#define VA_BITS_MIN (48) #else -#define MAX_USER_VA_BITS VA_BITS +#define VA_BITS_MIN (VA_BITS) #endif +#define _PAGE_END(va) (-(UL(1) << ((va) - 1))) + +#define KERNEL_START _text +#define KERNEL_END _end + /* * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual * address space for the shadow region respectively. They can bloat the stack * significantly, so double the (minimum) stack size when they are in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + + KASAN_SHADOW_OFFSET) #define KASAN_THREAD_SHIFT 1 #else -#define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 -#endif +#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN)) +#endif /* CONFIG_KASAN */ #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) @@ -117,14 +126,14 @@ * 16 KB granule: 128 level 3 entries, with contiguous bit * 64 KB granule: 32 level 3 entries, with contiguous bit */ -#define SEGMENT_ALIGN SZ_2M +#define SEGMENT_ALIGN SZ_2M #else /* * 4 KB granule: 16 level 3 entries, with contiguous bit * 16 KB granule: 4 level 3 entries, without contiguous bit * 64 KB granule: 1 level 3 entry */ -#define SEGMENT_ALIGN SZ_64K +#define SEGMENT_ALIGN SZ_64K #endif /* @@ -157,10 +166,13 @@ #endif #ifndef __ASSEMBLY__ +extern u64 vabits_actual; +#define PAGE_END (_PAGE_END(vabits_actual)) #include <linux/bitops.h> #include <linux/mmdebug.h> +extern s64 physvirt_offset; extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) @@ -176,9 +188,6 @@ static inline unsigned long kaslr_offset(void) return kimage_vaddr - KIMAGE_VADDR; } -/* the actual size of a user virtual address */ -extern u64 vabits_user; - /* * Allow all memory at the discovery stage. We will clip it later. */ @@ -200,25 +209,31 @@ extern u64 vabits_user; * up with a tagged userland pointer. Clear the tag to get a sane pointer to * pass on to access_ok(), for instance. */ -#define untagged_addr(addr) \ - ((__typeof__(addr))sign_extend64((u64)(addr), 55)) +#define __untagged_addr(addr) \ + ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) + +#define untagged_addr(addr) ({ \ + u64 __addr = (__force u64)addr; \ + __addr &= __untagged_addr(__addr); \ + (__force __typeof__(addr))__addr; \ +}) #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -#define __tag_set(addr, tag) (__typeof__(addr))( \ - ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag)) -#define __tag_reset(addr) untagged_addr(addr) +#define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else +#define __tag_shifted(tag) 0UL +#define __tag_reset(addr) (addr) +#define __tag_get(addr) 0 +#endif /* CONFIG_KASAN_SW_TAGS */ + static inline const void *__tag_set(const void *addr, u8 tag) { - return addr; + u64 __addr = (u64)addr & ~__tag_shifted(0xff); + return (const void *)(__addr | __tag_shifted(tag)); } -#define __tag_reset(addr) (addr) -#define __tag_get(addr) 0 -#endif - /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h @@ -227,19 +242,18 @@ static inline const void *__tag_set(const void *addr, u8 tag) /* - * The linear kernel range starts in the middle of the virtual adddress + * The linear kernel range starts at the bottom of the virtual address * space. Testing the top bit for the start of the region is a - * sufficient check. + * sufficient check and avoids having to worry about the tag. */ -#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) +#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) -#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) +#define __lm_to_phys(addr) (((addr) + physvirt_offset)) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ - phys_addr_t __x = (phys_addr_t)(x); \ - __is_lm_address(__x) ? __lm_to_phys(__x) : \ - __kimg_to_phys(__x); \ + phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ + __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ }) #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) @@ -250,9 +264,9 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #else #define __virt_to_phys(x) __virt_to_phys_nodebug(x) #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) -#endif +#endif /* CONFIG_DEBUG_VIRTUAL */ -#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) +#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) /* @@ -286,41 +300,38 @@ static inline void *phys_to_virt(phys_addr_t x) #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) -#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* - * virt_to_page(k) convert a _valid_ virtual address to struct page * - * virt_addr_valid(k) indicates whether a virtual address is valid + * virt_to_page(x) convert a _valid_ virtual address to struct page * + * virt_addr_valid(x) indicates whether a virtual address is valid */ #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) #else -#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) - -#define page_to_virt(page) ({ \ - unsigned long __addr = \ - ((__page_to_voff(page)) | PAGE_OFFSET); \ - const void *__addr_tag = \ - __tag_set((void *)__addr, page_kasan_tag(page)); \ - ((void *)__addr_tag); \ +#define page_to_virt(x) ({ \ + __typeof__(x) __page = x; \ + u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ + u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ }) -#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) +#define virt_to_page(x) ({ \ + u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ + u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ + (struct page *)__addr; \ +}) +#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ -#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ - + PHYS_OFFSET) >> PAGE_SHIFT) -#endif -#endif +#define virt_addr_valid(addr) ({ \ + __typeof__(addr) __addr = addr; \ + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ +}) -#define _virt_addr_is_linear(kaddr) \ - (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) -#define virt_addr_valid(kaddr) \ - (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) +#endif /* !ASSEMBLY */ /* * Given that the GIC architecture permits ITS implementations that can only be @@ -335,4 +346,4 @@ static inline void *phys_to_virt(phys_addr_t x) #include <asm-generic/memory_model.h> -#endif +#endif /* __ASM_MEMORY_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index fd6161336653..e4d862420bb4 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,52 +29,11 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -static inline bool arm64_kernel_unmapped_at_el0(void) -{ - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && - cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); -} +extern bool arm64_use_ng_mappings; -static inline bool arm64_kernel_use_ng_mappings(void) +static inline bool arm64_kernel_unmapped_at_el0(void) { - bool tx1_bug; - - /* What's a kpti? Use global mappings if we don't know. */ - if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - return false; - - /* - * Note: this function is called before the CPU capabilities have - * been configured, so our early mappings will be global. If we - * later determine that kpti is required, then - * kpti_install_ng_mappings() will make them non-global. - */ - if (arm64_kernel_unmapped_at_el0()) - return true; - - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return false; - - /* - * KASLR is enabled so we're going to be enabling kpti on non-broken - * CPUs regardless of their susceptibility to Meltdown. Rather - * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning. - */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; -#ifndef MODULE - } else if (!static_branch_likely(&arm64_const_caps_ready)) { - extern const struct midr_range cavium_erratum_27456_cpus[]; - - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); -#endif - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); - } - - return !tx1_bug && kaslr_offset() > 0; + return arm64_use_ng_mappings; } typedef void (*bp_hardening_cb_t)(void); @@ -126,8 +85,9 @@ extern void init_mem_pgprot(void); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, bool page_mappings_only); -extern void *fixmap_remap_fdt(phys_addr_t dt_phys); +extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); +extern bool kaslr_requires_kpti(void); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..3827ff4040a3 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd; static inline bool __cpu_uses_extended_idmap(void) { - if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) return false; return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); @@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) isb(); } -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) +#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) /* diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index f80e13cbf8ec..1e93de68c044 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -21,7 +21,7 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - struct plt_entry *ftrace_trampoline; + struct plt_entry *ftrace_trampolines; }; #endif diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index 799d9dd6f7cc..cf3a0fd7c1a7 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -21,6 +21,13 @@ static inline u64 paravirt_steal_clock(int cpu) { return pv_ops.time.steal_clock(cpu); } -#endif + +int __init pv_time_init(void); + +#else + +#define pv_time_init() do {} while (0) + +#endif // CONFIG_PARAVIRT #endif diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 9e690686e8aa..70b323cf8300 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_PCI_H #define __ASM_PCI_H -#ifdef __KERNEL__ #include <linux/types.h> #include <linux/slab.h> @@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI */ -#endif /* __KERNEL__ */ #endif /* __ASM_PCI_H */ diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 14d0bc44d451..172d76fa0245 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -15,8 +15,6 @@ #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ -#define check_pgt_cache() do { } while (0) - #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index db92950bb1a0..6bf5e650da78 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -69,7 +69,7 @@ #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PTRS_PER_PGD (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT)) +#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) /* * Section address mask and size definitions. @@ -110,6 +110,7 @@ #define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1) #define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0) #define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0) +#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */ /* * Level 2 descriptor (PMD). @@ -292,6 +293,8 @@ #define TCR_HD (UL(1) << 40) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) +#define TCR_E0PD0 (UL(1) << 55) +#define TCR_E0PD1 (UL(1) << 56) /* * TTBR. @@ -304,7 +307,7 @@ #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) #endif -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ (UL(1) << (48 - PGDIR_SHIFT))) * 8) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 92d2e9f28f28..6f87839f0249 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -26,17 +26,17 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) @@ -77,20 +77,20 @@ }) #define PAGE_S2 __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(NORMAL) | PTE_S2_RDONLY | PAGE_S2_XN) -#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) +/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) -#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_READONLY #define __P011 PAGE_READONLY -#define __P100 PAGE_EXECONLY +#define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_READONLY_EXEC #define __P111 PAGE_READONLY_EXEC @@ -99,7 +99,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_EXECONLY +#define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e09760ece844..538c85e62f86 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -17,13 +17,11 @@ * VMALLOC range. * * VMALLOC_START: beginning of the kernel vmalloc space - * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space + * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space * and fixed mappings */ #define VMALLOC_START (MODULES_END) -#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) - -#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) +#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define FIRST_USER_ADDRESS 0UL @@ -35,6 +33,8 @@ #include <linux/mm_types.h> #include <linux/sched.h> +extern struct page *vmemmap; + extern void __pte_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); @@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -/* - * Execute-only user mappings do not have the PTE_USER bit set. All valid - * kernel mappings have the PTE_UXN bit set. - */ #define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) #define pte_valid_user(pte) \ @@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; /* * p??_access_permitted() is true for valid user mappings (subject to the - * write permission check) other than user execute-only which do not have the - * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + * write permission check). PROT_NONE mappings do not have the PTE_VALID bit + * set. */ #define pte_access_permitted(pte, write) \ (pte_valid_user(pte) && (!(write) || pte_write(pte))) @@ -220,8 +216,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) + if (pte_valid_not_user(pte)) { dsb(ishst); + isb(); + } } extern void __sync_icache_dcache(pte_t pteval); @@ -281,23 +279,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -#define __HAVE_ARCH_PTE_SAME -static inline int pte_same(pte_t pte_a, pte_t pte_b) -{ - pteval_t lhs, rhs; - - lhs = pte_val(pte_a); - rhs = pte_val(pte_b); - - if (pte_present(pte_a)) - lhs &= ~PTE_RDONLY; - - if (pte_present(pte_b)) - rhs &= ~PTE_RDONLY; - - return (lhs == rhs); -} - /* * Huge pte definitions. */ @@ -435,6 +416,18 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) #define pgprot_device(prot) \ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) +/* + * DMA allocations for non-coherent devices use what the Arm architecture calls + * "Normal non-cacheable" memory, which permits speculation, unaligned accesses + * and merging of writes. This is different from "Device-nGnR[nE]" memory which + * is intended for MMIO and thus forbids speculation, preserves access size, + * requires strict alignment and can also force write responses to come from the + * endpoint. + */ +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ + PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -448,6 +441,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) +#define pmd_leaf(pmd) pmd_sect(pmd) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 static inline bool pud_sect(pud_t pud) { return false; } @@ -484,8 +478,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) WRITE_ONCE(*pmdp, pmd); - if (pmd_valid(pmd)) + if (pmd_valid(pmd)) { dsb(ishst); + isb(); + } } static inline void pmd_clear(pmd_t *pmdp) @@ -530,6 +526,7 @@ static inline void pte_unmap(pte_t *pte) { } #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) #define pud_present(pud) pte_present(pud_pte(pud)) +#define pud_leaf(pud) pud_sect(pud) #define pud_valid(pud) pte_valid(pud_pte(pud)) static inline void set_pud(pud_t *pudp, pud_t pud) @@ -543,8 +540,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) WRITE_ONCE(*pudp, pud); - if (pud_valid(pud)) + if (pud_valid(pud)) { dsb(ishst); + isb(); + } } static inline void pud_clear(pud_t *pudp) @@ -602,6 +601,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) WRITE_ONCE(*pgdp, pgd); dsb(ishst); + isb(); } static inline void pgd_clear(pgd_t *pgdp) @@ -842,8 +842,6 @@ extern int kern_addr_valid(unsigned long addr); #include <asm-generic/pgtable.h> -static inline void pgtable_cache_init(void) { } - /* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ @@ -859,15 +857,26 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~VA_START) -#define kc_offset_to_vaddr(o) ((o) | VA_START) - #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) #else #define phys_to_ttbr(addr) (addr) #endif +/* + * On arm64 without hardware Access Flag, copying from user will fail because + * the pte is old and cannot be marked young. So we always end up with zeroed + * page after fork() + CoW for pfn mappings. We don't always have a + * hardware-managed access flag on arm64. + */ +static inline bool arch_faults_on_old_pte(void) +{ + WARN_ON(preemptible()); + + return !cpu_has_hw_af(); +} +#define arch_faults_on_old_pte arch_faults_on_old_pte + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index d328540cb85e..7a24bad1a58b 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); * The EL0 pointer bits used by a pointer authentication code. * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. */ -#define ptrauth_user_pac_mask() GENMASK(54, vabits_user) +#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual) /* Only valid for EL0 TTBR0 instruction pointers */ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index d49951647014..80e946b2abee 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset) return pc == preempt_offset; } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 368d90a9d0e5..a2ce65a0c1fa 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -9,7 +9,6 @@ #ifndef __ASM_PROCFNS_H #define __ASM_PROCFNS_H -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include <asm/page.h> @@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..5ba63204d078 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -9,7 +9,7 @@ #define __ASM_PROCESSOR_H #define KERNEL_DS UL(-1) -#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1) +#define USER_DS ((UL(1) << VA_BITS) - 1) /* * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is @@ -20,17 +20,18 @@ #define NET_IP_ALIGN 0 #ifndef __ASSEMBLY__ -#ifdef __KERNEL__ #include <linux/build_bug.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/stddef.h> #include <linux/string.h> +#include <linux/thread_info.h> #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/hw_breakpoint.h> +#include <asm/kasan.h> #include <asm/lse.h> #include <asm/pgtable-hwdef.h> #include <asm/pointer_auth.h> @@ -42,8 +43,8 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ -#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS) -#define TASK_SIZE_64 (UL(1) << vabits_user) +#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) +#define TASK_SIZE_64 (UL(1) << vabits_actual) #ifdef CONFIG_COMPAT #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) @@ -215,6 +216,18 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->sp = sp; } +static inline bool is_ttbr0_addr(unsigned long addr) +{ + /* entry assembly clears tags for TTBR0 addrs */ + return addr < TASK_SIZE; +} + +static inline bool is_ttbr1_addr(unsigned long addr) +{ + /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ + return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; +} + #ifdef CONFIG_COMPAT static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) @@ -281,10 +294,6 @@ static inline void spin_lock_prefetch(const void *ptr) "nop") : : "p" (ptr)); } -#define HAVE_ARCH_PICK_MMAP_LAYOUT - -#endif - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); @@ -306,6 +315,14 @@ extern void __init minsigstksz_setup(void); /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(unsigned long arg); +long get_tagged_addr_ctrl(void); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() +#endif + /* * For CONFIG_GCC_PLUGIN_STACKLEAK * diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 0b8e7269ec82..38187f74e089 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -5,7 +5,7 @@ #ifndef __ASM_PTDUMP_H #define __ASM_PTDUMP_H -#ifdef CONFIG_ARM64_PTDUMP_CORE +#ifdef CONFIG_PTDUMP_CORE #include <linux/mm_types.h> #include <linux/seq_file.h> @@ -21,15 +21,15 @@ struct ptdump_info { unsigned long base_addr; }; -void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); -#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS +void ptdump_walk(struct seq_file *s, struct ptdump_info *info); +#ifdef CONFIG_PTDUMP_DEBUGFS void ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else static inline void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { } #endif void ptdump_check_wx(void); -#endif /* CONFIG_ARM64_PTDUMP_CORE */ +#endif /* CONFIG_PTDUMP_CORE */ #ifdef CONFIG_DEBUG_WX #define debug_checkwx() ptdump_check_wx() diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 1dcf63a9ac1f..bf57308fcd63 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -62,6 +62,7 @@ #define PSR_AA32_I_BIT 0x00000080 #define PSR_AA32_A_BIT 0x00000100 #define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_PAN_BIT 0x00400000 #define PSR_AA32_SSBS_BIT 0x00800000 #define PSR_AA32_DIT_BIT 0x01000000 #define PSR_AA32_Q_BIT 0x08000000 @@ -301,6 +302,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) return regs->regs[0]; } +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->regs[0] = rc; +} + /** * regs_get_kernel_argument() - get Nth function argument in kernel * @regs: pt_regs of that context diff --git a/arch/arm64/include/asm/pvclock-abi.h b/arch/arm64/include/asm/pvclock-abi.h new file mode 100644 index 000000000000..c4f1c0a0789c --- /dev/null +++ b/arch/arm64/include/asm/pvclock-abi.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019 Arm Ltd. */ + +#ifndef __ASM_PVCLOCK_ABI_H +#define __ASM_PVCLOCK_ABI_H + +/* The below structure is defined in ARM DEN0057A */ + +struct pvclock_vcpu_stolen_time { + __le32 revision; + __le32 attributes; + __le64 stolen_time; + /* Structure must be 64 byte aligned, pad to that size */ + u8 padding[48]; +} __packed; + +#endif diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 788ae971f11c..3994169985ef 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -8,13 +8,13 @@ #include <asm-generic/sections.h> extern char __alt_instructions[], __alt_instructions_end[]; -extern char __exception_text_start[], __exception_text_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; +extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index bd43d1cf724b..7e9f163d02ec 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h @@ -5,7 +5,6 @@ #ifndef __ASM_SIGNAL32_H #define __ASM_SIGNAL32_H -#ifdef __KERNEL__ #ifdef CONFIG_COMPAT #include <linux/compat.h> @@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs) { } #endif /* CONFIG_COMPAT */ -#endif /* __KERNEL__ */ #endif /* __ASM_SIGNAL32_H */ diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 7434844036d3..89cba2622b79 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy); static __must_check inline bool may_use_simd(void) { /* + * We must make sure that the SVE has been initialized properly + * before using the SIMD in kernel. * fpsimd_context_busy is only set while preemption is disabled, * and is clear whenever preemption is enabled. Since * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy @@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void) * migrated, and if it's clear we cannot be migrated to a CPU * where it is set. */ - return !in_irq() && !irqs_disabled() && !in_nmi() && - !this_cpu_read(fpsimd_context_busy); + return !WARN_ON(!system_capabilities_finalized()) && + system_supports_fpsimd() && + !in_irq() && !irqs_disabled() && !in_nmi() && + !this_cpu_read(fpsimd_context_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index b093b287babf..9083d6992603 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -11,4 +11,17 @@ /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() +/* + * Changing this will break osq_lock() thanks to the call inside + * smp_cond_load_relaxed(). + * + * See: + * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net + */ +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return false; +} + #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index 507d0ee6bc69..b383b4802a7b 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -8,6 +8,8 @@ #ifndef __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H +struct pt_regs; + #define SC_ARM64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ @@ -35,8 +37,11 @@ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) -#define COND_SYSCALL_COMPAT(name) \ - cond_syscall(__arm64_compat_sys_##name); +#define COND_SYSCALL_COMPAT(name) \ + asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } #define COMPAT_SYS_NI(name) \ SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); @@ -61,20 +66,18 @@ } \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -#ifndef SYSCALL_DEFINE0 #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \ ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) -#endif -#ifndef COND_SYSCALL -#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name) -#endif +#define COND_SYSCALL(name) \ + asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } -#ifndef SYS_NI #define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); -#endif #endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06ebcfef73df..b91570ff9db1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -146,6 +146,7 @@ #define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) #define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) #define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) +#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) #define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) @@ -212,6 +213,9 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) +#define SYS_PAR_EL1_F BIT(0) +#define SYS_PAR_EL1_FST GENMASK(6, 1) + /*** Statistical Profiling Extension ***/ /* ID registers */ #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) @@ -362,6 +366,9 @@ #define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) #define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) +#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) +#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) + #define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0) #define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) #define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) @@ -499,28 +506,11 @@ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ (BIT(29))) -#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \ - (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \ - (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \ - (BIT(27)) | (BIT(30)) | (BIT(31)) | \ - (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE -#define ENDIAN_CLEAR_EL2 0 #else #define ENDIAN_SET_EL2 0 -#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE -#endif - -/* SCTLR_EL2 value used for the hyp-stub */ -#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) -#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ - SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ - SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) - -#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL -#error "Inconsistent SCTLR_EL2 set/clear bits" #endif /* SCTLR_EL1 specific flags. */ @@ -539,16 +529,11 @@ #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ (BIT(29))) -#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \ - (BIT(27)) | (BIT(30)) | (BIT(31)) | \ - (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) -#define ENDIAN_CLEAR_EL1 0 #else #define ENDIAN_SET_EL1 0 -#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #endif #define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ @@ -556,15 +541,21 @@ SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) -#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\ - SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ - SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) -#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL -#error "Inconsistent SCTLR_EL1 set/clear bits" -#endif +/* MAIR_ELx memory attributes (used by Linux) */ +#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) +#define MAIR_ATTR_DEVICE_nGnRE UL(0x04) +#define MAIR_ATTR_DEVICE_GRE UL(0x0c) +#define MAIR_ATTR_NORMAL_NC UL(0x44) +#define MAIR_ATTR_NORMAL_WT UL(0xbb) +#define MAIR_ATTR_NORMAL UL(0xff) +#define MAIR_ATTR_MASK UL(0xff) + +/* Position the attr at the correct index */ +#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) /* id_aa64isar0 */ +#define ID_AA64ISAR0_RNDR_SHIFT 60 #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 #define ID_AA64ISAR0_DP_SHIFT 44 @@ -579,6 +570,10 @@ #define ID_AA64ISAR0_AES_SHIFT 4 /* id_aa64isar1 */ +#define ID_AA64ISAR1_I8MM_SHIFT 52 +#define ID_AA64ISAR1_DGH_SHIFT 48 +#define ID_AA64ISAR1_BF16_SHIFT 44 +#define ID_AA64ISAR1_SPECRES_SHIFT 40 #define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_FRINTTS_SHIFT 32 #define ID_AA64ISAR1_GPI_SHIFT 28 @@ -631,12 +626,20 @@ #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 /* id_aa64zfr0 */ +#define ID_AA64ZFR0_F64MM_SHIFT 56 +#define ID_AA64ZFR0_F32MM_SHIFT 52 +#define ID_AA64ZFR0_I8MM_SHIFT 44 #define ID_AA64ZFR0_SM4_SHIFT 40 #define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BF16_SHIFT 20 #define ID_AA64ZFR0_BITPERM_SHIFT 16 #define ID_AA64ZFR0_AES_SHIFT 4 #define ID_AA64ZFR0_SVEVER_SHIFT 0 +#define ID_AA64ZFR0_F64MM 0x1 +#define ID_AA64ZFR0_F32MM 0x1 +#define ID_AA64ZFR0_I8MM 0x1 +#define ID_AA64ZFR0_BF16 0x1 #define ID_AA64ZFR0_SM4 0x1 #define ID_AA64ZFR0_SHA3 0x1 #define ID_AA64ZFR0_BITPERM 0x1 @@ -681,6 +684,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 @@ -705,6 +709,14 @@ #define ID_ISAR5_AES_SHIFT 4 #define ID_ISAR5_SEVL_SHIFT 0 +#define ID_ISAR6_I8MM_SHIFT 24 +#define ID_ISAR6_BF16_SHIFT 20 +#define ID_ISAR6_SPECRES_SHIFT 16 +#define ID_ISAR6_SB_SHIFT 12 +#define ID_ISAR6_FHM_SHIFT 8 +#define ID_ISAR6_DP_SHIFT 4 +#define ID_ISAR6_JSCVT_SHIFT 0 + #define MVFR0_FPROUND_SHIFT 28 #define MVFR0_FPSHVEC_SHIFT 24 #define MVFR0_FPSQRT_SHIFT 20 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..f0cec4160136 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -8,8 +8,6 @@ #ifndef __ASM_THREAD_INFO_H #define __ASM_THREAD_INFO_H -#ifdef __KERNEL__ - #include <linux/compiler.h> #ifndef __ASSEMBLY__ @@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk); #endif -/* - * thread information flags: - * TIF_SYSCALL_TRACE - syscall trace active - * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace - * TIF_SYSCALL_AUDIT - syscall auditing - * TIF_SECCOMP - syscall secure computing - * TIF_SYSCALL_EMU - syscall emulation active - * TIF_SIGPENDING - signal pending - * TIF_NEED_RESCHED - rescheduling necessary - * TIF_NOTIFY_RESUME - callback before returning to user - */ -#define TIF_SIGPENDING 0 -#define TIF_NEED_RESCHED 1 +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_NOHZ 7 -#define TIF_SYSCALL_TRACE 8 -#define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_TRACEPOINT 10 -#define TIF_SECCOMP 11 -#define TIF_SYSCALL_EMU 12 +#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ +#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ +#define TIF_SECCOMP 11 /* syscall secure computing */ +#define TIF_SYSCALL_EMU 12 /* syscall emulation active */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 @@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ +#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk); .addr_limit = KERNEL_DS, \ } -#endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index a95d1fcb7e21..b76df828e6b7 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -44,7 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { - pgtable_page_dtor(pte); + pgtable_pte_page_dtor(pte); tlb_remove_table(tlb, pte); } diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8af7a85f76bd..bc3949064725 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ishst); __tlbi(vaae1is, addr); dsb(ish); + isb(); } #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 0524f2438649..a4d945db95a2 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -4,29 +4,6 @@ #include <linux/cpumask.h> -struct cpu_topology { - int thread_id; - int core_id; - int package_id; - int llc_id; - cpumask_t thread_sibling; - cpumask_t core_sibling; - cpumask_t llc_sibling; -}; - -extern struct cpu_topology cpu_topology[NR_CPUS]; - -#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) -#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) - -void init_cpu_topology(void); -void store_cpu_topology(unsigned int cpuid); -void remove_cpu_topology(unsigned int cpuid); -const struct cpumask *cpu_coregroup_mask(int cpu); - #ifdef CONFIG_NUMA struct pci_bus; diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 59690613ac31..cee5928e1b7d 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -42,16 +42,6 @@ static inline int __in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__irqentry_text_end; } -static inline int in_exception_text(unsigned long ptr) -{ - int in; - - in = ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; - - return in ? : __in_irqentry_text(ptr); -} - static inline int in_entry_text(unsigned long ptr) { return ptr >= (unsigned long)&__entry_text_start && diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5a1c32260c1f..32fc8061aa76 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,6 +62,15 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; + /* + * Asynchronous I/O running in a kernel thread does not have the + * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag + * the user address before checking. + */ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && + (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) + addr = untagged_addr(addr); + __chk_user_ptr(addr); asm volatile( // A + B <= C + 1 for all A,B,C, in four easy steps: @@ -215,7 +224,8 @@ static inline void uaccess_enable_not_uao(void) /* * Sanitise a uaccess pointer such that it becomes NULL if above the - * current addr_limit. + * current addr_limit. In case the pointer is tagged (has the top byte set), + * untag the pointer before checking. */ #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) static inline void __user *__uaccess_mask_ptr(const void __user *ptr) @@ -223,10 +233,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) void __user *safe_ptr; asm volatile( - " bics xzr, %1, %2\n" + " bics xzr, %3, %2\n" " csel %0, %1, xzr, eq\n" : "=&r" (safe_ptr) - : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "r" (ptr), "r" (current_thread_info()->addr_limit), + "r" (untagged_addr(ptr)) : "cc"); csdb(); @@ -372,20 +383,34 @@ do { \ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ - __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ + unsigned long __acfu_ret; \ + uaccess_enable_not_uao(); \ + __acfu_ret = __arch_copy_from_user((to), \ + __uaccess_mask_ptr(from), (n)); \ + uaccess_disable_not_uao(); \ + __acfu_ret; \ }) extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); #define raw_copy_to_user(to, from, n) \ ({ \ - __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ + unsigned long __actu_ret; \ + uaccess_enable_not_uao(); \ + __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ + (from), (n)); \ + uaccess_disable_not_uao(); \ + __actu_ret; \ }) extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); #define raw_copy_in_user(to, from, n) \ ({ \ - __arch_copy_in_user(__uaccess_mask_ptr(to), \ - __uaccess_mask_ptr(from), (n)); \ + unsigned long __aciu_ret; \ + uaccess_enable_not_uao(); \ + __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ + uaccess_disable_not_uao(); \ + __aciu_ret; \ }) #define INLINE_COPY_TO_USER @@ -394,8 +419,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { - if (access_ok(to, n)) + if (access_ok(to, n)) { + uaccess_enable_not_uao(); n = __arch_clear_user(__uaccess_mask_ptr(to), n); + uaccess_disable_not_uao(); + } return n; } #define clear_user __clear_user diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 2629a68b8724..1dd22da1c3a9 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,11 +38,10 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 436 +#define __NR_compat_syscalls 439 #endif #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #ifndef __COMPAT_SYSCALL_NR #include <uapi/asm/unistd.h> diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 94ab29cf4f00..c1c61635f89c 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -879,6 +879,10 @@ __SYSCALL(__NR_fspick, sys_fspick) __SYSCALL(__NR_pidfd_open, sys_pidfd_open) #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) +#define __NR_openat2 437 +__SYSCALL(__NR_openat2, sys_openat2) +#define __NR_pidfd_getfd 438 +__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 9c15e0a06301..07468428fd29 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -5,8 +5,6 @@ #ifndef __ASM_VDSO_H #define __ASM_VDSO_H -#ifdef __KERNEL__ - /* * Default link address for the vDSO. * Since we randomise the VDSO mapping, there's little point in trying @@ -28,6 +26,4 @@ #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* __ASM_VDSO_H */ diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h index fb60a88b5ed4..3fd8fd6d8fc2 100644 --- a/arch/arm64/include/asm/vdso/compat_barrier.h +++ b/arch/arm64/include/asm/vdso/compat_barrier.h @@ -20,7 +20,7 @@ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") -#if __LINUX_ARM_ARCH__ >= 8 +#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD) #define aarch32_smp_mb() dmb(ish) #define aarch32_smp_rmb() dmb(ishld) #define aarch32_smp_wmb() dmb(ishst) diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index c50ee1b7d5cd..537b1e695365 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -16,7 +16,7 @@ #define VDSO_HAS_CLOCK_GETRES 1 -#define VDSO_HAS_32BIT_FALLBACK 1 +#define BUILD_VDSO32 1 static __always_inline int gettimeofday_fallback(struct __kernel_old_timeval *_tv, diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index 0c731bfc7c8c..0c20a7c1bee5 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -31,13 +31,6 @@ int __arm64_get_clock_mode(struct timekeeper *tk) #define __arch_get_clock_mode __arm64_get_clock_mode static __always_inline -int __arm64_use_vsyscall(struct vdso_data *vdata) -{ - return !vdata[CS_HRES_COARSE].clock_mode; -} -#define __arch_use_vsyscall __arm64_use_vsyscall - -static __always_inline void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) { vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK; diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h deleted file mode 100644 index ba6dbc3de864..000000000000 --- a/arch/arm64/include/asm/vdso_datapage.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 ARM Limited - */ -#ifndef __ASM_VDSO_DATAPAGE_H -#define __ASM_VDSO_DATAPAGE_H - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -struct vdso_data { - __u64 cs_cycle_last; /* Timebase at clocksource init */ - __u64 raw_time_sec; /* Raw time */ - __u64 raw_time_nsec; - __u64 xtime_clock_sec; /* Kernel time */ - __u64 xtime_clock_nsec; - __u64 xtime_coarse_sec; /* Coarse time */ - __u64 xtime_coarse_nsec; - __u64 wtm_clock_sec; /* Wall to monotonic time */ - __u64 wtm_clock_nsec; - __u32 tb_seq_count; /* Timebase sequence counter */ - /* cs_* members must be adjacent and in this order (ldp accesses) */ - __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */ - __u32 cs_shift; /* Clocksource shift (mono = raw) */ - __u32 cs_raw_mult; /* Raw clocksource multiplier */ - __u32 tz_minuteswest; /* Whacky timezone stuff */ - __u32 tz_dsttime; - __u32 use_syscall; - __u32 hrtimer_res; -}; - -#endif /* !__ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - -#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h new file mode 100644 index 000000000000..2ca708ab9b20 --- /dev/null +++ b/arch/arm64/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_ARM64_VMALLOC_H +#define _ASM_ARM64_VMALLOC_H + +#endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index d88e56b90b93..27e984977402 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -1,77 +1,2 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H -#define _ASM_ARM64_XEN_PAGE_COHERENT_H - -#include <linux/dma-mapping.h> -#include <asm/page.h> #include <xen/arm/page-coherent.h> - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - - if (pfn_valid(pfn)) - dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); - else - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) - dma_direct_sync_single_for_device(hwdev, handle, size, dir); - else - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long page_pfn = page_to_xen_pfn(page); - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); - unsigned long compound_pages = - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; - bool local = (page_pfn <= dev_pfn) && - (dev_pfn - page_pfn < compound_pages); - - if (local) - dma_direct_map_page(hwdev, page, offset, size, dir, attrs); - else - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); -} - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long pfn = PFN_DOWN(handle); - /* - * Dom0 is mapped 1:1, while the Linux page can be spanned accross - * multiple Xen page, it's not possible to have a mix of local and - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a - * foreign mfn will always return false. If the page is local we can - * safely call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (pfn_valid(pfn)) - dma_direct_unmap_page(hwdev, handle, size, dir, attrs); - else - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); -} - -#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h deleted file mode 100644 index e6e784051932..000000000000 --- a/arch/arm64/include/asm/xen/xen-ops.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_XEN_OPS_H -#define _ASM_XEN_OPS_H - -void xen_efi_runtime_setup(void); - -#endif /* _ASM_XEN_OPS_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a1e72886b30c..7752d93bb50f 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -65,5 +65,13 @@ #define HWCAP2_SVESM4 (1 << 6) #define HWCAP2_FLAGM2 (1 << 7) #define HWCAP2_FRINT (1 << 8) +#define HWCAP2_SVEI8MM (1 << 9) +#define HWCAP2_SVEF32MM (1 << 10) +#define HWCAP2_SVEF64MM (1 << 11) +#define HWCAP2_SVEBF16 (1 << 12) +#define HWCAP2_I8MM (1 << 13) +#define HWCAP2_BF16 (1 << 14) +#define HWCAP2_DGH (1 << 15) +#define HWCAP2_RNG (1 << 16) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9a507716ae2f..ba85bb23f060 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -164,8 +164,9 @@ struct kvm_vcpu_events { struct { __u8 serror_pending; __u8 serror_has_esr; + __u8 ext_dabt_pending; /* Align it to 8 bytes */ - __u8 pad[6]; + __u8 pad[5]; __u64 serror_esr; } exception; __u32 reserved[12]; @@ -219,10 +220,18 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2) #define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1) -/* EL0 Virtual Timer Registers */ +/* + * EL0 Virtual Timer Registers + * + * WARNING: + * KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined + * with the appropriate register encodings. Their values have been + * accidentally swapped. As this is set API, the definitions here + * must be used, rather than ones derived from the encodings. + */ #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) /* KVM-as-firmware specific pseudo-registers */ #define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) @@ -323,10 +332,14 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 +#define KVM_ARM_VCPU_PVTIME_CTRL 2 +#define KVM_ARM_VCPU_PVTIME_IPA 0 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 7ed9294e2004..d1bb5b69f1ce 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -49,6 +49,7 @@ #define PSR_SSBS_BIT 0x00001000 #define PSR_PAN_BIT 0x00400000 #define PSR_UAO_BIT 0x00800000 +#define PSR_DIT_BIT 0x01000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h deleted file mode 100644 index 313325fa22fa..000000000000 --- a/arch/arm64/include/uapi/asm/stat.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#include <asm-generic/stat.h> diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 4703d218663a..f83a70e07df8 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -19,5 +19,6 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_TIME32_SYSCALLS +#define __ARCH_WANT_SYS_CLONE3 #include <asm-generic/unistd.h> |