From 621516789ee6e285cb2088fe4706eedd030d38bf Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:17 -0500 Subject: arm64: kexec: make dtb_mem always enabled Currently, dtb_mem is enabled only when CONFIG_KEXEC_FILE is enabled. This adds ugly ifdefs to c files. Always enabled dtb_mem, when it is not used, it is NULL. Change the dtb_mem to phys_addr_t, as it is a physical address. Signed-off-by: Pavel Tatashin Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 12a561a54128..ad6afed69078 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,14 +90,14 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif -#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - unsigned long dtb_mem; + phys_addr_t dtb_mem; }; +#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; -- cgit v1.2.1 From 7ea4088938b793d9a8d4e205b2fe54fba4cf9739 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 4 Dec 2019 10:59:22 -0500 Subject: arm64: hibernate: add PUD_SECT_RDONLY There is PMD_SECT_RDONLY that is used in pud_* function which is confusing. Signed-off-by: Pavel Tatashin Acked-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d9fbd433cc17..9961c7cee9c5 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -110,6 +110,7 @@ #define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1) #define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0) #define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0) +#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */ /* * Level 2 descriptor (PMD). -- cgit v1.2.1 From 3751e728cef2908c15974a5ae44627fd41ef3362 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Mon, 16 Dec 2019 11:12:47 +0900 Subject: arm64: kexec_file: add crash dump support Enabling crash dump (kdump) includes * prepare contents of ELF header of a core dump file, /proc/vmcore, using crash_prepare_elf64_headers(), and * add two device tree properties, "linux,usable-memory-range" and "linux,elfcorehdr", which represent respectively a memory range to be used by crash dump kernel and the header's location Signed-off-by: AKASHI Takahiro Cc: Catalin Marinas Cc: Will Deacon Reviewed-by: James Morse Tested-and-reviewed-by: Bhupesh Sharma Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 12a561a54128..d24b527e8c00 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {} struct kimage_arch { void *dtb; unsigned long dtb_mem; + /* Core ELF header buffer */ + void *elf_headers; + unsigned long elf_headers_mem; + unsigned long elf_headers_sz; }; extern const struct kexec_file_ops kexec_image_ops; -- cgit v1.2.1 From 4e410ef96ce6c8d2e2b69d09ab5d44dc8d5352e4 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Sat, 21 Dec 2019 08:32:38 +0000 Subject: arm64: Remove __exception_text_start and __exception_text_end from asm/section.h Linux commit b6e43c0e3129 ("arm64: remove __exception annotations") has removed __exception_text_start and __exception_text_end sections. So removing reference of __exception_text_start and __exception_text_end from from asm/section.h. Cc: James Morse Reviewed-by: Anshuman Khandual Signed-off-by: Prabhakar Kushwaha Signed-off-by: Will Deacon --- arch/arm64/include/asm/sections.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 25a73aab438f..3994169985ef 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -8,7 +8,6 @@ #include extern char __alt_instructions[], __alt_instructions_end[]; -extern char __exception_text_start[], __exception_text_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -- cgit v1.2.1 From 1595fe299eb5a664c754eaf48bc178c0d664e1cf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Jan 2020 16:00:50 +0000 Subject: Revert "arm64: kexec: make dtb_mem always enabled" Adding crash dump support to 'kexec_file' is going to extend 'struct kimage_arch' with more 'kexec_file'-specific members. The cleanup here then starts to get in the way, so revert it. This reverts commit 621516789ee6e285cb2088fe4706eedd030d38bf. Reported-by: AKASHI Takahiro Signed-off-by: Will Deacon --- arch/arm64/include/asm/kexec.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index ad6afed69078..12a561a54128 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,14 +90,14 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +#ifdef CONFIG_KEXEC_FILE #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { void *dtb; - phys_addr_t dtb_mem; + unsigned long dtb_mem; }; -#ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; struct kimage; -- cgit v1.2.1 From b51c6ac220f77eb246e940442d970b4065c197b0 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:17 +0000 Subject: arm64: Introduce system_capabilities_finalized() marker We finalize the system wide capabilities after the SMP CPUs are booted by the kernel. This is used as a marker for deciding various checks in the kernel. e.g, sanity check the hotplugged CPUs for missing mandatory features. However there is no explicit helper available for this in the kernel. There is sys_caps_initialised, which is not exposed. The other closest one we have is the jump_label arm64_const_caps_ready which denotes that the capabilities are set and the capability checks could use the individual jump_labels for fast path. This is performed before setting the ELF Hwcaps, which must be checked against the new CPUs. We also perform some of the other initialization e.g, SVE setup, which is important for the use of FP/SIMD where SVE is supported. Normally userspace doesn't get to run before we finish this. However the in-kernel users may potentially start using the neon mode. So, we need to reject uses of neon mode before we are set. Instead of defining a new marker for the completion of SVE setup, we could simply reuse the arm64_const_caps_ready and enable it once we have finished all the setup. Also we could expose this to the various users as "system_capabilities_finalized()" to make it more meaningful than "const_caps_ready". Cc: Ard Biesheuvel Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 5 +++++ arch/arm64/include/asm/kvm_host.h | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4261d55e8506..92ef9539874a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -613,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void) system_uses_irq_prio_masking(); } +static inline bool system_capabilities_finalized(void) +{ + return static_branch_likely(&arm64_const_caps_ready); +} + #define ARM64_BP_HARDEN_UNKNOWN -1 #define ARM64_BP_HARDEN_WA_NEEDED 0 #define ARM64_BP_HARDEN_NOT_REQUIRED 1 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c61260cf63c5..48ce54639eb5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -547,7 +547,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * wrong, and hyp will crash and burn when it uses any * cpus_have_const_cap() wrapper. */ - BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); + BUG_ON(!system_capabilities_finalized()); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); /* -- cgit v1.2.1 From 0cd82feb017e282b29e725801b7c90da69316734 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 13 Jan 2020 23:30:18 +0000 Subject: arm64: fpsimd: Make sure SVE setup is complete before SIMD is used In-kernel users of NEON rely on may_use_simd() to check if the SIMD can be used. However, we must initialize the SVE before SIMD can be used. Add a sanity check to make sure that we have completed the SVE setup before anyone uses the SIMD. Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Will Deacon Reviewed-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/simd.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 7434844036d3..89cba2622b79 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy); static __must_check inline bool may_use_simd(void) { /* + * We must make sure that the SVE has been initialized properly + * before using the SIMD in kernel. * fpsimd_context_busy is only set while preemption is disabled, * and is clear whenever preemption is enabled. Since * this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy @@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void) * migrated, and if it's clear we cannot be migrated to a CPU * where it is set. */ - return !in_irq() && !irqs_disabled() && !in_nmi() && - !this_cpu_read(fpsimd_context_busy); + return !WARN_ON(!system_capabilities_finalized()) && + system_supports_fpsimd() && + !in_irq() && !irqs_disabled() && !in_nmi() && + !this_cpu_read(fpsimd_context_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ -- cgit v1.2.1 From d4209d8b717311d114b5d47ba7f8249fd44e97c2 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 16 Dec 2019 11:33:37 +0000 Subject: arm64: cpufeature: Export matrix and other features to userspace Export the features introduced as part of ARMv8.6 exposed in the ID_AA64ISAR1_EL1 and ID_AA64ZFR0_EL1 registers. This introduces the Matrix features (ARMv8.2-I8MM, ARMv8.2-F64MM and ARMv8.2-F32MM) along with BFloat16 (Armv8.2-BF16), speculation invalidation (SPECRES) and Data Gathering Hint (ARMv8.0-DGH). Signed-off-by: Julien Grall [Added other features in those registers] Signed-off-by: Steven Price [will: Don't advertise SPECRES to userspace] Signed-off-by: Will Deacon --- arch/arm64/include/asm/hwcap.h | 7 +++++++ arch/arm64/include/asm/sysreg.h | 12 ++++++++++++ 2 files changed, 19 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 3d2f2472a36c..fcb390ea29ea 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -86,6 +86,13 @@ #define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4) #define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2) #define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT) +#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM) +#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM) +#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM) +#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16) +#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM) +#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) +#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..f56c4a02a127 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -553,6 +553,10 @@ #define ID_AA64ISAR0_AES_SHIFT 4 /* id_aa64isar1 */ +#define ID_AA64ISAR1_I8MM_SHIFT 52 +#define ID_AA64ISAR1_DGH_SHIFT 48 +#define ID_AA64ISAR1_BF16_SHIFT 44 +#define ID_AA64ISAR1_SPECRES_SHIFT 40 #define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_FRINTTS_SHIFT 32 #define ID_AA64ISAR1_GPI_SHIFT 28 @@ -605,12 +609,20 @@ #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 /* id_aa64zfr0 */ +#define ID_AA64ZFR0_F64MM_SHIFT 56 +#define ID_AA64ZFR0_F32MM_SHIFT 52 +#define ID_AA64ZFR0_I8MM_SHIFT 44 #define ID_AA64ZFR0_SM4_SHIFT 40 #define ID_AA64ZFR0_SHA3_SHIFT 32 +#define ID_AA64ZFR0_BF16_SHIFT 20 #define ID_AA64ZFR0_BITPERM_SHIFT 16 #define ID_AA64ZFR0_AES_SHIFT 4 #define ID_AA64ZFR0_SVEVER_SHIFT 0 +#define ID_AA64ZFR0_F64MM 0x1 +#define ID_AA64ZFR0_F32MM 0x1 +#define ID_AA64ZFR0_I8MM 0x1 +#define ID_AA64ZFR0_BF16 0x1 #define ID_AA64ZFR0_SM4 0x1 #define ID_AA64ZFR0_SHA3 0x1 #define ID_AA64ZFR0_BITPERM 0x1 -- cgit v1.2.1 From 8e3747beff8c4533bf4f1a61b53e061266ef57db Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 17 Dec 2019 20:17:32 +0530 Subject: arm64: Introduce ID_ISAR6 CPU register This adds basic building blocks required for ID_ISAR6 CPU register which identifies support for various instruction implementation on AArch32 state. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: James Morse Cc: Suzuki K Poulose Cc: Mark Rutland Cc: linux-kernel@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Acked-by: Marc Zyngier Signed-off-by: Anshuman Khandual [will: Ensure SPECRES is treated the same as on A64] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/sysreg.h | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index d72d995b7e25..b4a40535a3d8 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u32 reg_id_isar3; u32 reg_id_isar4; u32 reg_id_isar5; + u32 reg_id_isar6; u32 reg_id_mmfr0; u32 reg_id_mmfr1; u32 reg_id_mmfr2; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f56c4a02a127..6ad2db18d110 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -146,6 +146,7 @@ #define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) #define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) #define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) +#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) #define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) @@ -691,6 +692,14 @@ #define ID_ISAR5_AES_SHIFT 4 #define ID_ISAR5_SEVL_SHIFT 0 +#define ID_ISAR6_I8MM_SHIFT 24 +#define ID_ISAR6_BF16_SHIFT 20 +#define ID_ISAR6_SPECRES_SHIFT 16 +#define ID_ISAR6_SB_SHIFT 12 +#define ID_ISAR6_FHM_SHIFT 8 +#define ID_ISAR6_DP_SHIFT 4 +#define ID_ISAR6_JSCVT_SHIFT 0 + #define MVFR0_FPROUND_SHIFT 28 #define MVFR0_FPSHVEC_SHIFT 24 #define MVFR0_FPSQRT_SHIFT 20 -- cgit v1.2.1 From 395af861377d14616c221831430f58e5786b92f1 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 15 Jan 2020 11:30:08 +0000 Subject: arm64: Move the LSE gas support detection to Kconfig As the Kconfig syntax gained support for $(as-instr) tests, move the LSE gas support detection from Makefile to the main arm64 Kconfig and remove the additional CONFIG_AS_LSE definition and check. Cc: Will Deacon Reviewed-by: Vladimir Murzin Tested-by: Vladimir Murzin Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_ll_sc.h | 2 +- arch/arm64/include/asm/lse.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 7b012148bfd6..13869b76b58c 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -12,7 +12,7 @@ #include -#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE) +#ifdef CONFIG_ARM64_LSE_ATOMICS #define __LL_SC_FALLBACK(asm_ops) \ " b 3f\n" \ " .subsection 1\n" \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..4e1009fff686 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -4,7 +4,7 @@ #include -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#ifdef CONFIG_ARM64_LSE_ATOMICS #include #include @@ -36,7 +36,7 @@ static inline bool system_uses_lse_atomics(void) #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) -#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#else /* CONFIG_ARM64_LSE_ATOMICS */ static inline bool system_uses_lse_atomics(void) { return false; } @@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; } #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* CONFIG_ARM64_LSE_ATOMICS */ #endif /* __ASM_LSE_H */ -- cgit v1.2.1 From 3e6c69a058deaa50d33c3dac36cde80b4ce590e8 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:14 +0000 Subject: arm64: Add initial support for E0PD Kernel Page Table Isolation (KPTI) is used to mitigate some speculation based security issues by ensuring that the kernel is not mapped when userspace is running but this approach is expensive and is incompatible with SPE. E0PD, introduced in the ARMv8.5 extensions, provides an alternative to this which ensures that accesses from userspace to the kernel's half of the memory map to always fault with constant time, preventing timing attacks without requiring constant unmapping and remapping or preventing legitimate accesses. Currently this feature will only be enabled if all CPUs in the system support E0PD, if some CPUs do not support the feature at boot time then the feature will not be enabled and in the unlikely event that a late CPU is the first CPU to lack the feature then we will reject that CPU. This initial patch does not yet integrate with KPTI, this will be dealt with in followup patches. Ideally we could ensure that by default we don't use KPTI on CPUs where E0PD is present. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose [will: Fixed typo in Kconfig text] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/pgtable-hwdef.h | 2 ++ arch/arm64/include/asm/sysreg.h | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index b92683871119..33ff25c1ab1b 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46 #define ARM64_WORKAROUND_1542419 47 #define ARM64_WORKAROUND_1319367 48 +#define ARM64_HAS_E0PD 49 -#define ARM64_NCAPS 49 +#define ARM64_NCAPS 50 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d9fbd433cc17..378566f4882e 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -292,6 +292,8 @@ #define TCR_HD (UL(1) << 40) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) +#define TCR_E0PD0 (UL(1) << 55) +#define TCR_E0PD1 (UL(1) << 56) /* * TTBR. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..b085258cfe4e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -655,6 +655,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 -- cgit v1.2.1 From c2d92353b28f8542c6b3150900b38c94b1d61480 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:15 +0000 Subject: arm64: Factor out checks for KASLR in KPTI code into separate function In preparation for integrating E0PD support with KASLR factor out the checks for interaction between KASLR and KPTI done in boot context into a new function kaslr_requires_kpti(), in the process clarifying the distinction between what we do in boot context and what we do at runtime. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 62 ++++++++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 17 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index f217e3292919..2a93d34cc0ca 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -35,10 +35,46 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } -static inline bool arm64_kernel_use_ng_mappings(void) +/* + * This check is triggered during the early boot before the cpufeature + * is initialised. Checking the status on the local CPU allows the boot + * CPU to detect the need for non-global mappings and thus avoiding a + * pagetable re-write after all the CPUs are booted. This check will be + * anyway run on individual CPUs, allowing us to get the consistent + * state once the SMP CPUs are up and thus make the switch to non-global + * mappings if required. + */ +static inline bool kaslr_requires_kpti(void) { bool tx1_bug; + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return false; + + /* + * Systems affected by Cavium erratum 24756 are incompatible + * with KPTI. + */ + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { + tx1_bug = false; +#ifndef MODULE + } else if (!static_branch_likely(&arm64_const_caps_ready)) { + extern const struct midr_range cavium_erratum_27456_cpus[]; + + tx1_bug = is_midr_in_range_list(read_cpuid_id(), + cavium_erratum_27456_cpus); +#endif + } else { + tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); + } + if (tx1_bug) + return false; + + return kaslr_offset() > 0; +} + +static inline bool arm64_kernel_use_ng_mappings(void) +{ /* What's a kpti? Use global mappings if we don't know. */ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) return false; @@ -52,29 +88,21 @@ static inline bool arm64_kernel_use_ng_mappings(void) if (arm64_kernel_unmapped_at_el0()) return true; - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + /* + * Once we are far enough into boot for capabilities to be + * ready we will have confirmed if we are using non-global + * mappings so don't need to consider anything else here. + */ + if (static_branch_likely(&arm64_const_caps_ready)) return false; /* * KASLR is enabled so we're going to be enabling kpti on non-broken * CPUs regardless of their susceptibility to Meltdown. Rather * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning. + * just put down non-global mappings from the beginning */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; -#ifndef MODULE - } else if (!static_branch_likely(&arm64_const_caps_ready)) { - extern const struct midr_range cavium_erratum_27456_cpus[]; - - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); -#endif - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); - } - - return !tx1_bug && kaslr_offset() > 0; + return kaslr_requires_kpti(); } typedef void (*bp_hardening_cb_t)(void); -- cgit v1.2.1 From 92ac6fd162b42628ebe50cc2f08d6a77759e7911 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:16 +0000 Subject: arm64: Don't use KPTI where we have E0PD Since E0PD is intended to fulfil the same role as KPTI we don't need to use KPTI on CPUs where E0PD is available, we can rely on E0PD instead. Change the check that forces KPTI on when KASLR is enabled to check for E0PD before doing so, CPUs with E0PD are not expected to be affected by meltdown so should not need to enable KPTI for other reasons. Since E0PD is a system capability we will still enable KPTI if any of the CPUs in the system lacks E0PD, this will rewrite any global mappings that were established in systems where some but not all CPUs support E0PD. We may transiently have a mix of global and non-global mappings while booting since we use the local CPU when deciding if KPTI will be required prior to completing CPU enumeration but any global mappings will be converted to non-global ones when KPTI is applied. KPTI can still be forced on from the command line if required. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 2a93d34cc0ca..1eec3971f0a9 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -47,10 +47,21 @@ static inline bool arm64_kernel_unmapped_at_el0(void) static inline bool kaslr_requires_kpti(void) { bool tx1_bug; + u64 ftr; if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return false; + /* + * E0PD does a similar job to KPTI so can be used instead + * where available. + */ + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { + ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); + if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) + return false; + } + /* * Systems affected by Cavium erratum 24756 are incompatible * with KPTI. -- cgit v1.2.1 From 09e3c22a86f6889db0e93fb29d9255081a126f64 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 9 Dec 2019 18:12:17 +0000 Subject: arm64: Use a variable to store non-global mappings decision Refactor the code which checks to see if we need to use non-global mappings to use a variable instead of checking with the CPU capabilities each time, doing the initial check for KPTI early in boot before we start allocating memory so we still avoid transitioning to non-global mappings in common cases. Since this variable always matches our decision about non-global mappings this means we can also combine arm64_kernel_use_ng_mappings() and arm64_unmap_kernel_at_el0() into a single function, the variable simply stores the result and the decision code is elsewhere. We could just have the users check the variable directly but having a function makes it clear that these uses are read-only. The result is that we simplify the code a bit and reduces the amount of code executed at runtime. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 87 ++--------------------------------- arch/arm64/include/asm/pgtable-prot.h | 4 +- 2 files changed, 6 insertions(+), 85 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 1eec3971f0a9..e4d862420bb4 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -29,91 +29,11 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) -static inline bool arm64_kernel_unmapped_at_el0(void) -{ - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && - cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); -} +extern bool arm64_use_ng_mappings; -/* - * This check is triggered during the early boot before the cpufeature - * is initialised. Checking the status on the local CPU allows the boot - * CPU to detect the need for non-global mappings and thus avoiding a - * pagetable re-write after all the CPUs are booted. This check will be - * anyway run on individual CPUs, allowing us to get the consistent - * state once the SMP CPUs are up and thus make the switch to non-global - * mappings if required. - */ -static inline bool kaslr_requires_kpti(void) -{ - bool tx1_bug; - u64 ftr; - - if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return false; - - /* - * E0PD does a similar job to KPTI so can be used instead - * where available. - */ - if (IS_ENABLED(CONFIG_ARM64_E0PD)) { - ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); - if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf) - return false; - } - - /* - * Systems affected by Cavium erratum 24756 are incompatible - * with KPTI. - */ - if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { - tx1_bug = false; -#ifndef MODULE - } else if (!static_branch_likely(&arm64_const_caps_ready)) { - extern const struct midr_range cavium_erratum_27456_cpus[]; - - tx1_bug = is_midr_in_range_list(read_cpuid_id(), - cavium_erratum_27456_cpus); -#endif - } else { - tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456); - } - if (tx1_bug) - return false; - - return kaslr_offset() > 0; -} - -static inline bool arm64_kernel_use_ng_mappings(void) +static inline bool arm64_kernel_unmapped_at_el0(void) { - /* What's a kpti? Use global mappings if we don't know. */ - if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) - return false; - - /* - * Note: this function is called before the CPU capabilities have - * been configured, so our early mappings will be global. If we - * later determine that kpti is required, then - * kpti_install_ng_mappings() will make them non-global. - */ - if (arm64_kernel_unmapped_at_el0()) - return true; - - /* - * Once we are far enough into boot for capabilities to be - * ready we will have confirmed if we are using non-global - * mappings so don't need to consider anything else here. - */ - if (static_branch_likely(&arm64_const_caps_ready)) - return false; - - /* - * KASLR is enabled so we're going to be enabling kpti on non-broken - * CPUs regardless of their susceptibility to Meltdown. Rather - * than force everybody to go through the G -> nG dance later on, - * just put down non-global mappings from the beginning - */ - return kaslr_requires_kpti(); + return arm64_use_ng_mappings; } typedef void (*bp_hardening_cb_t)(void); @@ -167,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool page_mappings_only); extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); +extern bool kaslr_requires_kpti(void); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 8dc6c5cdabe6..0a1fd95a8972 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -26,8 +26,8 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0) -#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0) +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) -- cgit v1.2.1 From 5777eaed566a1d63e344d3dd8f2b5e33be20643e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 15 Jan 2020 16:42:39 +0000 Subject: arm64: Implement optimised checksum routine Apparently there exist certain workloads which rely heavily on software checksumming, for which the generic do_csum() implementation becomes a significant bottleneck. Therefore let's give arm64 its own optimised version - for ease of maintenance this foregoes assembly or intrisics, and is thus not actually arm64-specific, but does rely heavily on C idioms that translate well to the A64 ISA and the typical load/store capabilities of most ARMv8 CPU cores. The resulting increase in checksum throughput scales nicely with buffer size, tending towards 4x for a small in-order core (Cortex-A53), and up to 6x or more for an aggressive big core (Ampere eMAG). Reported-by: Lingyan Huang Tested-by: Lingyan Huang Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- arch/arm64/include/asm/checksum.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index d064a50deb5f..8d2a7de39744 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) } #define ip_fast_csum ip_fast_csum +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + #include #endif /* __ASM_CHECKSUM_H */ -- cgit v1.2.1 From e0d5896bd356cd577f9710a02d7a474cdf58426b Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Thu, 31 Oct 2019 12:57:05 -0700 Subject: arm64: lse: fix LSE atomics with LLVM's integrated assembler Unlike gcc, clang considers each inline assembly block to be independent and therefore, when using the integrated assembler for inline assembly, any preambles that enable features must be repeated in each block. This change defines __LSE_PREAMBLE and adds it to each inline assembly block that has LSE instructions, which allows them to be compiled also with clang's assembler. Link: https://github.com/ClangBuiltLinux/linux/issues/671 Signed-off-by: Sami Tolvanen Tested-by: Andrew Murray Tested-by: Kees Cook Reviewed-by: Andrew Murray Reviewed-by: Kees Cook Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_lse.h | 19 +++++++++++++++++++ arch/arm64/include/asm/lse.h | 6 +++--- 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 574808b9df4c..da3280f639cd 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -14,6 +14,7 @@ static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %w[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd) static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %w[i], %w[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic_and(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v) static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic_sub(int i, atomic_t *v) { asm volatile( + __LSE_PREAMBLE " neg %w[i], %w[i]\n" " stadd %w[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ u32 tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ " add %w[i], %w[i], %w[tmp]" \ @@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op " %[i], %[v]\n" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v)); \ @@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd) static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ asm volatile( \ + __LSE_PREAMBLE \ " " #asm_op #mb " %[i], %[i], %[v]" \ : [i] "+r" (i), [v] "+Q" (v->counter) \ : "r" (v) \ @@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ @@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " mvn %[i], %[i]\n" " stclr %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v) static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { asm volatile( + __LSE_PREAMBLE " neg %[i], %[i]\n" " stadd %[i], %[v]" : [i] "+&r" (i), [v] "+Q" (v->counter) @@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ " add %[i], %[i], %x[tmp]" \ @@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ asm volatile( \ + __LSE_PREAMBLE \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]" \ : [i] "+&r" (i), [v] "+Q" (v->counter) \ @@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile( + __LSE_PREAMBLE "1: ldr %x[tmp], %[v]\n" " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" @@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ + __LSE_PREAMBLE \ " mov %" #w "[tmp], %" #w "[old]\n" \ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ " mov %" #w "[ret], %" #w "[tmp]" \ @@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile( \ + __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 80b388278149..73834996c4b6 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -6,6 +6,8 @@ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) +#define __LSE_PREAMBLE ".arch armv8-a+lse\n" + #include #include #include @@ -14,8 +16,6 @@ #include #include -__asm__(".arch_extension lse"); - extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; @@ -34,7 +34,7 @@ static inline bool system_uses_lse_atomics(void) /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ - ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) + ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS) #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ -- cgit v1.2.1 From c54f90c2627cc316d365e3073614731e17dbc631 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Thu, 31 Oct 2019 12:46:52 -0700 Subject: arm64: fix alternatives with LLVM's integrated assembler LLVM's integrated assembler fails with the following error when building KVM: :12:6: error: expected absolute expression .if kvm_update_va_mask == 0 ^ :21:6: error: expected absolute expression .if kvm_update_va_mask == 0 ^ :24:2: error: unrecognized instruction mnemonic NOT_AN_INSTRUCTION ^ LLVM ERROR: Error parsing inline asm These errors come from ALTERNATIVE_CB and __ALTERNATIVE_CFG, which test for the existence of the callback parameter in inline assembly using the following expression: " .if " __stringify(cb) " == 0\n" This works with GNU as, but isn't supported by LLVM. This change splits __ALTERNATIVE_CFG and ALTINSTR_ENTRY into separate macros to fix the LLVM build. Link: https://github.com/ClangBuiltLinux/linux/issues/472 Signed-off-by: Sami Tolvanen Tested-by: Nick Desaulniers Reviewed-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative.h | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index b9f8d787eea9..324e7d5ab37e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature,cb) \ +#define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ - " .else\n" \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ " .word " __stringify(cb) "- .\n" /* callback */ \ - " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { } * * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature,cb) \ + ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ - ".else\n\t" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ "663:\n\t" \ "664:\n\t" \ - ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) #else #include -- cgit v1.2.1 From 83b0c36b8a136042aa9209fc9ca2fab31d5fbe7c Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 17 Jan 2020 01:33:53 +0530 Subject: arm64: Add KRYO{3,4}XX CPU cores to spectre-v2 safe list The "silver" KRYO3XX and KRYO4XX CPU cores are not affected by Spectre variant 2. Add them to spectre_v2 safe list to correct the spurious ARM_SMCCC_ARCH_WORKAROUND_1 warning and vulnerability status reported under sysfs. Reviewed-by: Stephen Boyd Tested-by: Stephen Boyd Signed-off-by: Sai Prakash Ranjan [will: tweaked commit message to remove stale mention of "gold" cores] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cputype.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index aca07c2f6e6e..a87a93f67671 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -85,6 +85,8 @@ #define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_KRYO 0x200 +#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 +#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 @@ -111,6 +113,8 @@ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) +#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) -- cgit v1.2.1 From 95b3f74bec203804658e17f86fe20755bb8abcb9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 11 Dec 2019 18:40:09 +0000 Subject: arm64: Use macros instead of hard-coded constants for MAIR_EL1 Currently, the arm64 __cpu_setup has hard-coded constants for the memory attributes that go into the MAIR_EL1 register. Define proper macros in asm/sysreg.h and make use of them in proc.S. Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6e919fafb43d..e21470337c5e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -538,6 +538,18 @@ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) +/* MAIR_ELx memory attributes (used by Linux) */ +#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) +#define MAIR_ATTR_DEVICE_nGnRE UL(0x04) +#define MAIR_ATTR_DEVICE_GRE UL(0x0c) +#define MAIR_ATTR_NORMAL_NC UL(0x44) +#define MAIR_ATTR_NORMAL_WT UL(0xbb) +#define MAIR_ATTR_NORMAL UL(0xff) +#define MAIR_ATTR_MASK UL(0xff) + +/* Position the attr at the correct index */ +#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) + /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 #define ID_AA64ISAR0_FHM_SHIFT 48 -- cgit v1.2.1 From 170b25fa6aab84dc9bdba88d4c64af5fbb789b6b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:44 +0000 Subject: arm64: assembler: remove inherit_daif macro We haven't needed the inherit_daif macro since commit: ed3768db588291dd ("arm64: entry: convert el1_sync to C") ... which converted all callers to C and the local_daif_inherit function. Remove the unused macro. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..5f8a2772baeb 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -40,12 +40,6 @@ msr daif, \flags .endm - /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */ - .macro inherit_daif, pstate:req, tmp:req - and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) - msr daif, \tmp - .endm - /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ .macro enable_da_f msr daifclr, #(8 | 4 | 1) -- cgit v1.2.1 From ddb953f86cfb1ec450eff16a94230fc6d9901552 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:45 +0000 Subject: arm64: assembler: remove smp_dmb macro These days arm64 kernels are always SMP, and thus smp_dmb is an overly-long way of writing dmb. Naturally, no-one uses it. Remove the unused macro. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5f8a2772baeb..995362adaac0 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -79,13 +79,6 @@ 9990: .endm -/* - * SMP data memory barrier - */ - .macro smp_dmb, opt - dmb \opt - .endm - /* * RAS Error Synchronization barrier */ -- cgit v1.2.1 From 7a2c094464e39a54f5b9228cd78208cd43872bbd Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 16 Jan 2020 18:35:47 +0000 Subject: arm64: entry: cleanup el0 svc handler naming For most of the exception entry code, _handler() is the first C function called from the entry assembly in entry-common.c, and external functions handling the bulk of the logic are called do_(). For consistency, apply this scheme to el0_svc_handler and el0_svc_compat_handler, renaming them to do_el0_svc and do_el0_svc_compat respectively. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Anshuman Khandual Cc: Catalin Marinas Cc: James Morse Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4d5f3b5f50cd..b87c6e276ab1 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -45,8 +45,8 @@ void do_sysinstr(unsigned int esr, struct pt_regs *regs); void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); void do_cp15instr(unsigned int esr, struct pt_regs *regs); -void el0_svc_handler(struct pt_regs *regs); -void el0_svc_compat_handler(struct pt_regs *regs); +void do_el0_svc(struct pt_regs *regs); +void do_el0_svc_compat(struct pt_regs *regs); void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, struct pt_regs *regs); -- cgit v1.2.1