From b36506787cf19a3eb1116d5dcdef2e2ad5b2933a Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Fri, 18 Jan 2019 14:02:27 +0000 Subject: arm64: perf: remove misleading comment The comment for the armv8pmu_set_event_filter function suggests that it only works for PMUv2 PMUs - this is incorrect. Let's remove the incorrect comment. Acked-by: Mark Rutland Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1620a371b1f5..4addb38bc250 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -810,7 +810,7 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, } /* - * Add an event filter to a given event. This will only work for PMUv2 PMUs. + * Add an event filter to a given event. */ static int armv8pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) -- cgit v1.2.1 From 846a415bf4408ccd38d7c0b2a036249737a5ee56 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Jan 2019 11:41:25 +0000 Subject: arm64: default NR_CPUS to 256 There are shipping arm64 platforms with 256 hardware threads. So that we can make use of these with defconfig, bump the arm64 default NR_CPUS to 256. At the same time, drop a redundant comment. We only have one default for NR_CPUS, so there's nothing to sort. Acked-by: Will Deacon Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a4168d366127..4cad67b9ec0a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -792,8 +792,7 @@ config SCHED_SMT config NR_CPUS int "Maximum number of CPUs (2-4096)" range 2 4096 - # These have to remain sorted largest to smallest - default "64" + default "256" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" -- cgit v1.2.1 From 83a680dd97ad2d1ed7a6355aa5baddbc0c8ae2ae Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Fri, 18 Jan 2019 13:52:42 +0000 Subject: arm64: asm-offsets: remove unused offsets There are a number of offsets defined in asm-offsets.c which no longer have any users. Let's clean this up by removing them. All the remaining offsets are in use. Acked-by: Mark Rutland Signed-off-by: Andrew Murray Signed-off-by: Catalin Marinas --- arch/arm64/kernel/asm-offsets.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 65b8afc84466..0552b91d7666 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -53,13 +53,9 @@ int main(void) DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); - DEFINE(S_X1, offsetof(struct pt_regs, regs[1])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); - DEFINE(S_X3, offsetof(struct pt_regs, regs[3])); DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); - DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); - DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); @@ -73,12 +69,8 @@ int main(void) DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); -#ifdef CONFIG_COMPAT - DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); -#endif DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); - DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); @@ -93,7 +85,6 @@ int main(void) BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); - DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); @@ -110,25 +101,18 @@ int main(void) BLANK(); DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec)); - DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec)); DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); - DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); - DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult)); - DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult)); DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); BLANK(); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); @@ -142,13 +126,9 @@ int main(void) DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); - DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); - DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); - DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif #ifdef CONFIG_CPU_PM - DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); -- cgit v1.2.1 From 83504032e6ddcc8b0942aa24dfad5db849090c9f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Jan 2019 14:22:24 +0000 Subject: arm64: Remove asm/memblock.h The arm64 asm/memblock.h header exists only to provide a function prototype for arm64_memblock_init(), which is called only from setup_arch(). Move the declaration into mmu.h, where it can live alongside other init functions such as paging_init() and bootmem_init() without the need for its own special header file. Acked-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memblock.h | 21 --------------------- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/kernel/setup.c | 1 - arch/arm64/mm/mmu.c | 1 - 4 files changed, 1 insertion(+), 23 deletions(-) delete mode 100644 arch/arm64/include/asm/memblock.h (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h deleted file mode 100644 index 6afeed2467f1..000000000000 --- a/arch/arm64/include/asm/memblock.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_MEMBLOCK_H -#define __ASM_MEMBLOCK_H - -extern void arm64_memblock_init(void); - -#endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3e8063f4f9d3..67ef25d037ea 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -129,6 +129,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) static inline void arm64_apply_bp_hardening(void) { } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ +extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..71f5fbb12608 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b6f5aa52ac67..d6b6f1b169bb 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.1 From a2c801c53d1682871fba1e037c9d3b0c9fffee8a Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 9 Jan 2019 13:21:00 -0700 Subject: arm64: mm: make use of new memblocks_present() helper Cleanup the arm64_memory_present() function seeing it's very similar to other arches. memblocks_present() is a direct replacement of arm64_memory_present() Acked-by: Will Deacon Acked-by: Catalin Marinas Signed-off-by: Logan Gunthorpe Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7205a9085b4d..2302b4093a63 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -285,24 +285,6 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -#ifndef CONFIG_SPARSEMEM -static void __init arm64_memory_present(void) -{ -} -#else -static void __init arm64_memory_present(void) -{ - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - int nid = memblock_get_region_node(reg); - - memory_present(nid, memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } -} -#endif - static phys_addr_t memory_limit = PHYS_ADDR_MAX; /* @@ -489,7 +471,7 @@ void __init bootmem_init(void) * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. */ - arm64_memory_present(); + memblocks_present(); sparse_init(); zone_sizes_init(min, max); -- cgit v1.2.1 From a61326c076f25b7b808cccb827576808ec1cfa9d Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Sat, 19 Jan 2019 15:42:47 -0500 Subject: arm64: dts: qcom: pm8005: add interrupt controller properties Add interrupt controller properties now that spmi-gpio is a proper hierarchical IRQ chip. The interrupts property is no longer needed so remove it. This change was not tested on any hardware but the same change was tested on qcom-pm8941.dtsi using a LG Nexus 5 (hammerhead) phone with no issues. Signed-off-by: Brian Masney Reviewed-by: Stephen Boyd Reviewed-by: Bjorn Andersson Signed-off-by: Linus Walleij --- arch/arm64/boot/dts/qcom/pm8005.dtsi | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi index 4d5aca3eeb69..c0ddf128136c 100644 --- a/arch/arm64/boot/dts/qcom/pm8005.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi @@ -16,10 +16,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; -- cgit v1.2.1 From a1738363e41a65733a0d277d8189efa98315fdff Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Sat, 19 Jan 2019 15:42:48 -0500 Subject: arm64: dts: qcom: pm8998: add interrupt controller properties Add interrupt controller properties now that spmi-gpio is a proper hierarchical IRQ chip. The interrupts property is no longer needed so remove it. This change was not tested on any hardware but the same change was tested on qcom-pm8941.dtsi using a LG Nexus 5 (hammerhead) phone with no issues. Signed-off-by: Brian Masney Reviewed-by: Stephen Boyd Reviewed-by: Bjorn Andersson Signed-off-by: Linus Walleij --- arch/arm64/boot/dts/qcom/pm8998.dtsi | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi index f1025a50c227..43cb5ea14089 100644 --- a/arch/arm64/boot/dts/qcom/pm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi @@ -94,32 +94,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>, - <0 0xce 0 IRQ_TYPE_NONE>, - <0 0xcf 0 IRQ_TYPE_NONE>, - <0 0xd0 0 IRQ_TYPE_NONE>, - <0 0xd1 0 IRQ_TYPE_NONE>, - <0 0xd2 0 IRQ_TYPE_NONE>, - <0 0xd3 0 IRQ_TYPE_NONE>, - <0 0xd4 0 IRQ_TYPE_NONE>, - <0 0xd5 0 IRQ_TYPE_NONE>, - <0 0xd6 0 IRQ_TYPE_NONE>, - <0 0xd7 0 IRQ_TYPE_NONE>, - <0 0xd8 0 IRQ_TYPE_NONE>, - <0 0xd9 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; -- cgit v1.2.1 From 8cff9c8a78810ca09fd7c4d6d5d4f86a8b113059 Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Sat, 19 Jan 2019 15:42:49 -0500 Subject: arm64: dts: qcom: pmi8994: add interrupt controller properties Add interrupt controller properties now that spmi-gpio is a proper hierarchical IRQ chip. The interrupts property is no longer needed so remove it. This change was not tested on any hardware but the same change was tested on qcom-pm8941.dtsi using a LG Nexus 5 (hammerhead) phone with no issues. Signed-off-by: Brian Masney Reviewed-by: Stephen Boyd Reviewed-by: Bjorn Andersson Signed-off-by: Linus Walleij --- arch/arm64/boot/dts/qcom/pmi8994.dtsi | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi index dae1cdc23f54..3aee10e3f921 100644 --- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi @@ -15,16 +15,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <2 0xc0 0 IRQ_TYPE_NONE>, - <2 0xc1 0 IRQ_TYPE_NONE>, - <2 0xc2 0 IRQ_TYPE_NONE>, - <2 0xc3 0 IRQ_TYPE_NONE>, - <2 0xc4 0 IRQ_TYPE_NONE>, - <2 0xc5 0 IRQ_TYPE_NONE>, - <2 0xc6 0 IRQ_TYPE_NONE>, - <2 0xc7 0 IRQ_TYPE_NONE>, - <2 0xc8 0 IRQ_TYPE_NONE>, - <2 0xc9 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; -- cgit v1.2.1 From f14a5e6da4a50e78a18384faf81eef6171909d3f Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Sat, 19 Jan 2019 15:42:50 -0500 Subject: arm64: dts: qcom: pmi8998: add interrupt controller properties Add interrupt controller properties now that spmi-gpio is a proper hierarchical IRQ chip. The interrupts property is no longer needed so remove it. This change was not tested on any hardware but the same change was tested on qcom-pm8941.dtsi using a LG Nexus 5 (hammerhead) phone with no issues. Signed-off-by: Brian Masney Reviewed-by: Stephen Boyd Reviewed-by: Bjorn Andersson Signed-off-by: Linus Walleij --- arch/arm64/boot/dts/qcom/pmi8998.dtsi | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi index da3285e216e2..051f57e7d6ac 100644 --- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi @@ -14,20 +14,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; -- cgit v1.2.1 From 13e4cdd785867a632516f9cb5dfbe5ba20822820 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 15 Jan 2019 13:58:26 +0000 Subject: arm64: uaccess: Cleanup get/put_user() __get/put_user_check() macro is made to return a value but this is never used. Get rid of them and just use directly __get/put_user_error() as a statement, reducing macro indirection. Also, take this opportunity to rename __get/put_user_err() as it gets a bit confusing having them along __get/put_user_error(). Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 547d7a0c9d05..8e408084b8c3 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -268,7 +268,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __get_user_err(x, ptr, err) \ +#define __raw_get_user(x, ptr, err) \ do { \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ @@ -297,28 +297,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_check(x, ptr, err) \ -({ \ +#define __get_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __get_user_err((x), __p, (err)); \ + __raw_get_user((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ -}) - -#define __get_user_error(x, ptr, err) \ -({ \ - __get_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __get_user(x, ptr) \ ({ \ int __gu_err = 0; \ - __get_user_check((x), (ptr), __gu_err); \ + __get_user_error((x), (ptr), __gu_err); \ __gu_err; \ }) @@ -338,7 +332,7 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __put_user_err(x, ptr, err) \ +#define __raw_put_user(x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ @@ -366,28 +360,22 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user_check(x, ptr, err) \ -({ \ +#define __put_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __put_user_err((x), __p, (err)); \ + __raw_put_user((x), __p, (err)); \ } else { \ (err) = -EFAULT; \ } \ -}) - -#define __put_user_error(x, ptr, err) \ -({ \ - __put_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __put_user(x, ptr) \ ({ \ int __pu_err = 0; \ - __put_user_check((x), (ptr), __pu_err); \ + __put_user_error((x), (ptr), __pu_err); \ __pu_err; \ }) -- cgit v1.2.1 From 0bd3ef34d2a8dd4056560567073d8bfc5da92e39 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 15 Jan 2019 13:58:27 +0000 Subject: arm64: uaccess: Implement unsafe accessors Current implementation of get/put_user_unsafe default to get/put_user which toggle PAN before each access, despite having been told by the caller that multiple accesses to user memory were about to happen. Provide implementations for user_access_begin/end to turn PAN off/on and implement unsafe accessors that assume PAN was already turned off. Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 79 ++++++++++++++++++++++++++++++---------- 1 file changed, 59 insertions(+), 20 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8e408084b8c3..6a70c75ed9f4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -270,31 +270,26 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) #define __raw_get_user(x, ptr, err) \ do { \ - unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + __get_user_asm("ldrb", "ldtrb", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + __get_user_asm("ldrh", "ldtrh", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + __get_user_asm("ldr", "ldtr", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + __get_user_asm("ldr", "ldtr", "%x", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -302,8 +297,13 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ + unsigned long __gu_val; \ + __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - __raw_get_user((x), __p, (err)); \ + uaccess_enable_not_uao(); \ + __raw_get_user(__gu_val, __p, (err)); \ + uaccess_disable_not_uao(); \ + (x) = (__force __typeof__(*__p)) __gu_val; \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ @@ -334,30 +334,26 @@ do { \ #define __raw_put_user(x, ptr, err) \ do { \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ + __put_user_asm("strb", "sttrb", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ + __put_user_asm("strh", "sttrh", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ + __put_user_asm("str", "sttr", "%w", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ + __put_user_asm("str", "sttr", "%x", (x), (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ } while (0) #define __put_user_error(x, ptr, err) \ @@ -365,9 +361,13 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - __raw_put_user((x), __p, (err)); \ - } else { \ + uaccess_enable_not_uao(); \ + __raw_put_user(__pu_val, __p, (err)); \ + uaccess_disable_not_uao(); \ + } else { \ (err) = -EFAULT; \ } \ } while (0) @@ -381,6 +381,45 @@ do { \ #define put_user __put_user +static __must_check inline bool user_access_begin(const void __user *ptr, + size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + + uaccess_enable_not_uao(); + return true; +} +#define user_access_begin(ptr, len) user_access_begin(ptr, len) +#define user_access_end() uaccess_disable_not_uao() + +#define unsafe_get_user(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + unsigned long __gu_val; \ + int __gu_err = 0; \ + might_fault(); \ + __chk_user_ptr(__p); \ + __p = uaccess_mask_ptr(__p); \ + __raw_get_user(__gu_val, __p, __gu_err); \ + (x) = (__force __typeof__(*__p)) __gu_val; \ + if (__gu_err != 0) \ + goto err; \ +} while (0) + +#define unsafe_put_user(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + might_fault(); \ + __chk_user_ptr(__p); \ + __p = uaccess_mask_ptr(__p); \ + __raw_put_user(__pu_val, __p, __pu_err); \ + if (__pu_err != 0) \ + goto err; \ +} while (0) + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ -- cgit v1.2.1 From c1c9d41319c35daa099b2e6cd1325e3ae55cfda8 Mon Sep 17 00:00:00 2001 From: Bai Ping Date: Tue, 22 Jan 2019 10:17:13 +0000 Subject: dt-bindings: imx: Add pinctrl binding doc for imx8mm Add binding doc imx8mm pinctrl driver. Signed-off-by: Bai Ping Acked-by: Aisheng Dong Signed-off-by: Linus Walleij --- arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h | 629 +++++++++++++++++++++++++ 1 file changed, 629 insertions(+) create mode 100644 arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h new file mode 100644 index 000000000000..e25f7fcd7997 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2017-2018 NXP + */ + +#ifndef __DTS_IMX8MM_PINFUNC_H +#define __DTS_IMX8MM_PINFUNC_H + +/* + * The pin function ID is a tuple of + * + */ + +#define MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x028 0x290 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_ENET_PHY_REF_CLK_ROOT 0x028 0x290 0x4C0 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x028 0x290 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_EXT_CLK1 0x028 0x290 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_SJC_FAIL 0x028 0x290 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_GPIO1_IO1 0x02C 0x294 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_PWM1_OUT 0x02C 0x294 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_ANAMIX_REF_CLK_24M 0x02C 0x294 0x4BC 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_CCMSRCGPCMIX_EXT_CLK2 0x02C 0x294 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_SJC_ACTIVE 0x02C 0x294 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_GPIO1_IO2 0x030 0x298 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0x030 0x298 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_ANY 0x030 0x298 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_SJC_DE_B 0x030 0x298 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x034 0x29C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x034 0x29C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_SDMA1_EXT_EVENT0 0x034 0x29C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_ANAMIX_XTAL_OK 0x034 0x29C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_SJC_DONE 0x034 0x29C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x038 0x2A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x038 0x2A0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_SDMA1_EXT_EVENT1 0x038 0x2A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_ANAMIX_XTAL_OK_LV 0x038 0x2A0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_USDHC1_TEST_TRIG 0x038 0x2A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x03C 0x2A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_M4_NMI 0x03C 0x2A4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_PMIC_READY 0x03C 0x2A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_INT_BOOT 0x03C 0x2A4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_USDHC2_TEST_TRIG 0x03C 0x2A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x040 0x2A8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_ENET1_MDC 0x040 0x2A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_USDHC1_CD_B 0x040 0x2A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_CCMSRCGPCMIX_EXT_CLK3 0x040 0x2A8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_ECSPI1_TEST_TRIG 0x040 0x2A8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x044 0x2AC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_ENET1_MDIO 0x044 0x2AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_USDHC1_WP 0x044 0x2AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_CCMSRCGPCMIX_EXT_CLK4 0x044 0x2AC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_ECSPI2_TEST_TRIG 0x044 0x2AC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x048 0x2B0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_ENET1_1588_EVENT0_IN 0x048 0x2B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_USDHC2_RESET_B 0x048 0x2B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_CCMSRCGPCMIX_WAIT 0x048 0x2B0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_QSPI_TEST_TRIG 0x048 0x2B0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x04C 0x2B4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_ENET1_1588_EVENT0_OUT 0x04C 0x2B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_SDMA2_EXT_EVENT0 0x04C 0x2B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_CCMSRCGPCMIX_STOP 0x04C 0x2B4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_RAWNAND_TEST_TRIG 0x04C 0x2B4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x050 0x2B8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_USB1_OTG_ID 0x050 0x2B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_OCOTP_CTRL_WRAPPER_FUSE_LATCHED 0x050 0x2B8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x054 0x2BC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_USB2_OTG_ID 0x054 0x2BC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_PMIC_READY 0x054 0x2BC 0x4BC 0x5 0x1 +#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_OUT0 0x054 0x2BC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_CAAM_WRAPPER_RNG_OSC_OBS 0x054 0x2BC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x058 0x2C0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x058 0x2C0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_SDMA2_EXT_EVENT1 0x058 0x2C0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_CCMSRCGPCMIX_OUT1 0x058 0x2C0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_CSU_CSU_ALARM_AUT0 0x058 0x2C0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x05C 0x2C4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x05C 0x2C4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_PWM2_OUT 0x05C 0x2C4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_CCMSRCGPCMIX_OUT2 0x05C 0x2C4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_CSU_CSU_ALARM_AUT1 0x05C 0x2C4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x060 0x2C8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x060 0x2C8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_PWM3_OUT 0x060 0x2C8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_CCMSRCGPCMIX_CLKO1 0x060 0x2C8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_CSU_CSU_ALARM_AUT2 0x060 0x2C8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x064 0x2CC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x064 0x2CC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_PWM4_OUT 0x064 0x2CC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_CCMSRCGPCMIX_CLKO2 0x064 0x2CC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_CSU_CSU_INT_DEB 0x064 0x2CC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x068 0x2D0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_MDC_GPIO1_IO16 0x068 0x2D0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x06C 0x2D4 0x4C0 0x0 0x1 +#define MX8MM_IOMUXC_ENET_MDIO_GPIO1_IO17 0x06C 0x2D4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x070 0x2D8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD3_GPIO1_IO18 0x070 0x2D8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x074 0x2DC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD2_ENET1_TX_CLK 0x074 0x2DC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_TD2_GPIO1_IO19 0x074 0x2DC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x078 0x2E0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD1_GPIO1_IO20 0x078 0x2E0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x07C 0x2E4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD0_GPIO1_IO21 0x07C 0x2E4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x080 0x2E8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TX_CTL_GPIO1_IO22 0x080 0x2E8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x084 0x2EC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TXC_ENET1_TX_ER 0x084 0x2EC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_TXC_GPIO1_IO23 0x084 0x2EC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x088 0x2F0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RX_CTL_GPIO1_IO24 0x088 0x2F0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x08C 0x2F4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RXC_ENET1_RX_ER 0x08C 0x2F4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_RXC_GPIO1_IO25 0x08C 0x2F4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x090 0x2F8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD0_GPIO1_IO26 0x090 0x2F8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x094 0x2FC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD1_GPIO1_IO27 0x094 0x2FC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x098 0x300 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD2_GPIO1_IO28 0x098 0x300 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x09C 0x304 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD3_GPIO1_IO29 0x09C 0x304 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x0A0 0x308 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_CLK_GPIO2_IO0 0x0A0 0x308 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA2_GPIO2_IO4 0x0B0 0x318 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x0B4 0x31C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA3_GPIO2_IO5 0x0B4 0x31C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x0B8 0x320 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x0B8 0x320 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x0BC 0x324 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA5_GPIO2_IO7 0x0BC 0x324 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x0C0 0x328 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x0C0 0x328 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x0C4 0x32C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x0C4 0x32C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x0C8 0x330 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x0C8 0x330 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x0CC 0x334 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_STROBE_GPIO2_IO11 0x0CC 0x334 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x0D0 0x338 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x0D0 0x338 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0D4 0x33C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CLK_GPIO2_IO13 0x0D4 0x33C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CLK_CCMSRCGPCMIX_OBSERVE0 0x0D4 0x33C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_CLK_OBSERVE_MUX_OUT0 0x0D4 0x33C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0D8 0x340 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CMD_GPIO2_IO14 0x0D8 0x340 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CMD_CCMSRCGPCMIX_OBSERVE1 0x0D8 0x340 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_CMD_OBSERVE_MUX_OUT1 0x0D8 0x340 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0DC 0x344 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_GPIO2_IO15 0x0DC 0x344 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_CCMSRCGPCMIX_OBSERVE2 0x0DC 0x344 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_OBSERVE_MUX_OUT2 0x0DC 0x344 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x0E0 0x348 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_GPIO2_IO16 0x0E0 0x348 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_CCMSRCGPCMIX_WAIT 0x0E0 0x348 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_OBSERVE_MUX_OUT3 0x0E0 0x348 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x0E4 0x34C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_GPIO2_IO17 0x0E4 0x34C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_CCMSRCGPCMIX_STOP 0x0E4 0x34C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_OBSERVE_MUX_OUT4 0x0E4 0x34C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x0E8 0x350 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_GPIO2_IO18 0x0E8 0x350 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_CCMSRCGPCMIX_EARLY_RESET 0x0E8 0x350 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x0EC 0x354 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x0EC 0x354 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_CCMSRCGPCMIX_SYSTEM_RESET 0x0EC 0x354 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x0F0 0x358 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_WP_GPIO2_IO20 0x0F0 0x358 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_WP_SIM_M_HMASTLOCK 0x0F0 0x358 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_ALE_RAWNAND_ALE 0x0F4 0x35C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x0F4 0x35C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_ALE_GPIO3_IO0 0x0F4 0x35C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_ALE_SIM_M_HPROT0 0x0F4 0x35C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_RAWNAND_CE0_B 0x0F8 0x360 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x0F8 0x360 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x0F8 0x360 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_SIM_M_HPROT1 0x0F8 0x360 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_RAWNAND_CE1_B 0x0FC 0x364 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_QSPI_A_SS1_B 0x0FC 0x364 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x0FC 0x364 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x0FC 0x364 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_SIM_M_HPROT2 0x0FC 0x364 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_RAWNAND_CE2_B 0x100 0x368 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_QSPI_B_SS0_B 0x100 0x368 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x100 0x368 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x100 0x368 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_SIM_M_HPROT3 0x100 0x368 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_RAWNAND_CE3_B 0x104 0x36C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_QSPI_B_SS1_B 0x104 0x36C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x104 0x36C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x104 0x36C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_SIM_M_HADDR0 0x104 0x36C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CLE_RAWNAND_CLE 0x108 0x370 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CLE_QSPI_B_SCLK 0x108 0x370 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x108 0x370 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x108 0x370 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CLE_SIM_M_HADDR1 0x108 0x370 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_RAWNAND_DATA00 0x10C 0x374 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x10C 0x374 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_GPIO3_IO6 0x10C 0x374 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_SIM_M_HADDR2 0x10C 0x374 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_RAWNAND_DATA01 0x110 0x378 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x110 0x378 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x110 0x378 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_SIM_M_HADDR3 0x110 0x378 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_RAWNAND_DATA02 0x114 0x37C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x114 0x37C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_GPIO3_IO8 0x114 0x37C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_SIM_M_HADDR4 0x114 0x37C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_RAWNAND_DATA03 0x118 0x380 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x118 0x380 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_GPIO3_IO9 0x118 0x380 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_SIM_M_HADDR5 0x118 0x380 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_RAWNAND_DATA04 0x11C 0x384 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_QSPI_B_DATA0 0x11C 0x384 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x11C 0x384 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_GPIO3_IO10 0x11C 0x384 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_SIM_M_HADDR6 0x11C 0x384 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_RAWNAND_DATA05 0x120 0x388 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_QSPI_B_DATA1 0x120 0x388 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x120 0x388 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_GPIO3_IO11 0x120 0x388 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_SIM_M_HADDR7 0x120 0x388 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_RAWNAND_DATA06 0x124 0x38C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_QSPI_B_DATA2 0x124 0x38C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x124 0x38C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_GPIO3_IO12 0x124 0x38C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_SIM_M_HADDR8 0x124 0x38C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_RAWNAND_DATA07 0x128 0x390 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_QSPI_B_DATA3 0x128 0x390 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x128 0x390 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_GPIO3_IO13 0x128 0x390 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_SIM_M_HADDR9 0x128 0x390 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DQS_RAWNAND_DQS 0x12C 0x394 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DQS_QSPI_A_DQS 0x12C 0x394 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DQS_GPIO3_IO14 0x12C 0x394 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DQS_SIM_M_HADDR10 0x12C 0x394 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_RAWNAND_RE_B 0x130 0x398 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_QSPI_B_DQS 0x130 0x398 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x130 0x398 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x130 0x398 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_SIM_M_HADDR11 0x130 0x398 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_RAWNAND_READY_B 0x134 0x39C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x134 0x39C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_SIM_M_HADDR12 0x134 0x39C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_RAWNAND_WE_B 0x138 0x3A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x138 0x3A0 0x000 0x12 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_GPIO3_IO17 0x138 0x3A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_SIM_M_HADDR13 0x138 0x3A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_RAWNAND_WP_B 0x13C 0x3A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x13C 0x3A4 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_GPIO3_IO18 0x13C 0x3A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_SIM_M_HADDR14 0x13C 0x3A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_SAI5_RX_SYNC 0x140 0x3A8 0x4E4 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_SAI1_TX_DATA0 0x140 0x3A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_GPIO3_IO19 0x140 0x3A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_SAI5_RX_BCLK 0x144 0x3AC 0x4D0 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_SAI1_TX_DATA1 0x144 0x3AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_PDM_CLK 0x144 0x3AC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x144 0x3AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_SAI5_RX_DATA0 0x148 0x3B0 0x4D4 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_SAI1_TX_DATA2 0x148 0x3B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_PDM_DATA0 0x148 0x3B0 0x534 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x148 0x3B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_RX_DATA1 0x14C 0x3B4 0x4D8 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_DATA3 0x14C 0x3B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_SYNC 0x14C 0x3B4 0x4CC 0x2 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0x14C 0x3B4 0x4EC 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_PDM_DATA1 0x14C 0x3B4 0x538 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x14C 0x3B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_RX_DATA2 0x150 0x3B8 0x4DC 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_DATA4 0x150 0x3B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_SYNC 0x150 0x3B8 0x4CC 0x2 0x1 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0x150 0x3B8 0x4E8 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_PDM_DATA2 0x150 0x3B8 0x53c 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_GPIO3_IO23 0x150 0x3B8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_RX_DATA3 0x154 0x3BC 0x4E0 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_DATA5 0x154 0x3BC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_SYNC 0x154 0x3BC 0x4CC 0x2 0x2 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0x154 0x3BC 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_PDM_DATA3 0x154 0x3BC 0x540 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x154 0x3BC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI5_MCLK 0x158 0x3C0 0x52C 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI1_TX_BCLK 0x158 0x3C0 0x4C8 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI4_MCLK 0x158 0x3C0 0x000 0x2 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x158 0x3C0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_CCMSRCGPCMIX_TESTER_ACK 0x158 0x3C0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SAI1_RX_SYNC 0x15C 0x3C4 0x4C4 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SAI5_RX_SYNC 0x15C 0x3C4 0x4E4 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXFS_CORESIGHT_TRACE_CLK 0x15C 0x3C4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x15C 0x3C4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SIM_M_HADDR15 0x15C 0x3C4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SAI1_RX_BCLK 0x160 0x3C8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SAI5_RX_BCLK 0x160 0x3C8 0x4D0 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXC_CORESIGHT_TRACE_CTL 0x160 0x3C8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_GPIO4_IO1 0x160 0x3C8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SIM_M_HADDR16 0x160 0x3C8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0x164 0x3CC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SAI5_RX_DATA0 0x164 0x3CC 0x4D4 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD0_PDM_DATA0 0x164 0x3CC 0x534 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD0_CORESIGHT_TRACE0 0x164 0x3CC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x164 0x3CC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_CCMSRCGPCMIX_BOOT_CFG0 0x164 0x3CC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SIM_M_HADDR17 0x164 0x3CC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SAI1_RX_DATA1 0x168 0x3D0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SAI5_RX_DATA1 0x168 0x3D0 0x4D8 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD1_PDM_DATA1 0x168 0x3D0 0x538 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD1_CORESIGHT_TRACE1 0x168 0x3D0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x168 0x3D0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_CCMSRCGPCMIX_BOOT_CFG1 0x168 0x3D0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SIM_M_HADDR18 0x168 0x3D0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SAI1_RX_DATA2 0x16C 0x3D4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SAI5_RX_DATA2 0x16C 0x3D4 0x4DC 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD2_PDM_DATA2 0x16C 0x3D4 0x53C 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD2_CORESIGHT_TRACE2 0x16C 0x3D4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x16C 0x3D4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_CCMSRCGPCMIX_BOOT_CFG2 0x16C 0x3D4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SIM_M_HADDR19 0x16C 0x3D4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_SAI1_RX_DATA3 0x170 0x3D8 0x4E0 0x0 0x1 +#define MX8MM_IOMUXC_SAI1_RXD3_SAI5_RX_DATA3 0x170 0x3D8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_PDM_DATA3 0x170 0x3D8 0x540 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD3_CORESIGHT_TRACE3 0x170 0x3D8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x170 0x3D8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_CCMSRCGPCMIX_BOOT_CFG3 0x170 0x3D8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_SIM_M_HADDR20 0x170 0x3D8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI1_RX_DATA4 0x174 0x3DC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_TX_BCLK 0x174 0x3DC 0x51C 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_RX_BCLK 0x174 0x3DC 0x510 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_CORESIGHT_TRACE4 0x174 0x3DC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x174 0x3DC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_CCMSRCGPCMIX_BOOT_CFG4 0x174 0x3DC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SIM_M_HADDR21 0x174 0x3DC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_DATA5 0x178 0x3E0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_TX_DATA0 0x178 0x3E0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_RX_DATA0 0x178 0x3E0 0x514 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_SYNC 0x178 0x3E0 0x4C4 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD5_CORESIGHT_TRACE5 0x178 0x3E0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x178 0x3E0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_CCMSRCGPCMIX_BOOT_CFG5 0x178 0x3E0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SIM_M_HADDR22 0x178 0x3E0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI1_RX_DATA6 0x17C 0x3E4 0x520 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_TX_SYNC 0x17C 0x3E4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_RX_SYNC 0x17C 0x3E4 0x518 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_CORESIGHT_TRACE6 0x17C 0x3E4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_GPIO4_IO8 0x17C 0x3E4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_CCMSRCGPCMIX_BOOT_CFG6 0x17C 0x3E4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SIM_M_HADDR23 0x17C 0x3E4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_RX_DATA7 0x180 0x3E8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI6_MCLK 0x180 0x3E8 0x530 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_SYNC 0x180 0x3E8 0x4CC 0x2 0x4 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_DATA4 0x180 0x3E8 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_CORESIGHT_TRACE7 0x180 0x3E8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_GPIO4_IO9 0x180 0x3E8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_CCMSRCGPCMIX_BOOT_CFG7 0x180 0x3E8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SIM_M_HADDR24 0x180 0x3E8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0x184 0x3EC 0x4CC 0x0 0x3 +#define MX8MM_IOMUXC_SAI1_TXFS_SAI5_TX_SYNC 0x184 0x3EC 0x4EC 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXFS_CORESIGHT_EVENTO 0x184 0x3EC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x184 0x3EC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_SIM_M_HADDR25 0x184 0x3EC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0x188 0x3F0 0x4C8 0x0 0x1 +#define MX8MM_IOMUXC_SAI1_TXC_SAI5_TX_BCLK 0x188 0x3F0 0x4E8 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXC_CORESIGHT_EVENTI 0x188 0x3F0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x188 0x3F0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_SIM_M_HADDR26 0x188 0x3F0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0x18C 0x3F4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SAI5_TX_DATA0 0x18C 0x3F4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_CORESIGHT_TRACE8 0x18C 0x3F4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x18C 0x3F4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_CCMSRCGPCMIX_BOOT_CFG8 0x18C 0x3F4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SIM_M_HADDR27 0x18C 0x3F4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SAI1_TX_DATA1 0x190 0x3F8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SAI5_TX_DATA1 0x190 0x3F8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_CORESIGHT_TRACE9 0x190 0x3F8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x190 0x3F8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_CCMSRCGPCMIX_BOOT_CFG9 0x190 0x3F8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SIM_M_HADDR28 0x190 0x3F8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SAI1_TX_DATA2 0x194 0x3FC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SAI5_TX_DATA2 0x194 0x3FC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_CORESIGHT_TRACE10 0x194 0x3FC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x194 0x3FC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_CCMSRCGPCMIX_BOOT_CFG10 0x194 0x3FC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SIM_M_HADDR29 0x194 0x3FC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SAI1_TX_DATA3 0x198 0x400 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SAI5_TX_DATA3 0x198 0x400 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_CORESIGHT_TRACE11 0x198 0x400 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x198 0x400 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_CCMSRCGPCMIX_BOOT_CFG11 0x198 0x400 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SIM_M_HADDR30 0x198 0x400 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI1_TX_DATA4 0x19C 0x404 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_RX_BCLK 0x19C 0x404 0x510 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_TX_BCLK 0x19C 0x404 0x51C 0x2 0x1 +#define MX8MM_IOMUXC_SAI1_TXD4_CORESIGHT_TRACE12 0x19C 0x404 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x19C 0x404 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_CCMSRCGPCMIX_BOOT_CFG12 0x19C 0x404 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SIM_M_HADDR31 0x19C 0x404 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI1_TX_DATA5 0x1A0 0x408 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_RX_DATA0 0x1A0 0x408 0x514 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_TX_DATA0 0x1A0 0x408 0x000 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_CORESIGHT_TRACE13 0x1A0 0x408 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x1A0 0x408 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_CCMSRCGPCMIX_BOOT_CFG13 0x1A0 0x408 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SIM_M_HBURST0 0x1A0 0x408 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI1_TX_DATA6 0x1A4 0x40C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_RX_SYNC 0x1A4 0x40C 0x518 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_TX_SYNC 0x1A4 0x40C 0x520 0x2 0x1 +#define MX8MM_IOMUXC_SAI1_TXD6_CORESIGHT_TRACE14 0x1A4 0x40C 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x1A4 0x40C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_CCMSRCGPCMIX_BOOT_CFG14 0x1A4 0x40C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SIM_M_HBURST1 0x1A4 0x40C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SAI1_TX_DATA7 0x1A8 0x410 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SAI6_MCLK 0x1A8 0x410 0x530 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD7_PDM_CLK 0x1A8 0x410 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_CORESIGHT_TRACE15 0x1A8 0x410 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x1A8 0x410 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_CCMSRCGPCMIX_BOOT_CFG15 0x1A8 0x410 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SIM_M_HBURST2 0x1A8 0x410 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_MCLK 0x1AC 0x414 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI5_MCLK 0x1AC 0x414 0x52C 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_TX_BCLK 0x1AC 0x414 0x4C8 0x2 0x2 +#define MX8MM_IOMUXC_SAI1_MCLK_PDM_CLK 0x1AC 0x414 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_GPIO4_IO20 0x1AC 0x414 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SIM_M_HRESP 0x1AC 0x414 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC 0x1B0 0x418 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SAI5_TX_SYNC 0x1B0 0x418 0x4EC 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x1B0 0x418 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SIM_M_HSIZE0 0x1B0 0x418 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SAI2_RX_BCLK 0x1B4 0x41C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SAI5_TX_BCLK 0x1B4 0x41C 0x4E8 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1B4 0x41C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SIM_M_HSIZE1 0x1B4 0x41C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0x1B8 0x420 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SAI5_TX_DATA0 0x1B8 0x420 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_GPIO4_IO23 0x1B8 0x420 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SIM_M_HSIZE2 0x1B8 0x420 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0x1BC 0x424 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SAI5_TX_DATA1 0x1BC 0x424 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_GPIO4_IO24 0x1BC 0x424 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SIM_M_HWRITE 0x1BC 0x424 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0x1C0 0x428 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SAI5_TX_DATA2 0x1C0 0x428 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_GPIO4_IO25 0x1C0 0x428 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SIM_M_HREADYOUT 0x1C0 0x428 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0x1C4 0x42C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_SAI5_TX_DATA3 0x1C4 0x42C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_GPIO4_IO26 0x1C4 0x42C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_TPSMP_CLK 0x1C4 0x42C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_SAI2_MCLK 0x1C8 0x430 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_SAI5_MCLK 0x1C8 0x430 0x52C 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1C8 0x430 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_TPSMP_HDATA_DIR 0x1C8 0x430 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x1CC 0x434 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_GPT1_CAPTURE1 0x1CC 0x434 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_SAI5_RX_SYNC 0x1CC 0x434 0x4E4 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x1D4 0x43C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_GPT1_COMPARE1 0x1D4 0x43C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_SAI5_RX_DATA0 0x1D4 0x43C 0x4D4 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x1DC 0x444 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_GPT1_COMPARE2 0x1DC 0x444 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_SAI5_RX_DATA2 0x1DC 0x444 0x4DC 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXC_GPIO5_IO0 0x1DC 0x444 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_TPSMP_HDATA2 0x1DC 0x444 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x1E0 0x448 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_GPT1_COMPARE3 0x1E0 0x448 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_SAI5_RX_DATA3 0x1E0 0x448 0x4E0 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXD_GPIO5_IO1 0x1E0 0x448 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_TPSMP_HDATA3 0x1E0 0x448 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x1E4 0x44C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x1E4 0x44C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_SAI5_MCLK 0x1E4 0x44C 0x52C 0x2 0x3 +#define MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x1E4 0x44C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_TPSMP_HDATA4 0x1E4 0x44C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_SPDIF1_OUT 0x1E8 0x450 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_PWM3_OUT 0x1E8 0x450 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x1E8 0x450 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_TPSMP_HDATA5 0x1E8 0x450 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_SPDIF1_IN 0x1EC 0x454 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x1EC 0x454 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x1EC 0x454 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_TPSMP_HDATA6 0x1EC 0x454 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_SPDIF1_EXT_CLK 0x1F0 0x458 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x1F0 0x458 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x1F0 0x458 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_TPSMP_HDATA7 0x1F0 0x458 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x1F4 0x45C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x1F4 0x45C 0x504 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DTE_TX 0x1F4 0x45C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_GPIO5_IO6 0x1F4 0x45C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_TPSMP_HDATA8 0x1F4 0x45C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x1F8 0x460 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x1F8 0x460 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DTE_RX 0x1F8 0x460 0x504 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI1_MOSI_GPIO5_IO7 0x1F8 0x460 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_TPSMP_HDATA9 0x1F8 0x460 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x1FC 0x464 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x1FC 0x464 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DTE_RTS_B 0x1FC 0x464 0x500 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_GPIO5_IO8 0x1FC 0x464 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_TPSMP_HDATA10 0x1FC 0x464 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_ECSPI1_SS0 0x200 0x468 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x200 0x468 0x500 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DTE_CTS_B 0x200 0x468 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x200 0x468 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_TPSMP_HDATA11 0x200 0x468 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x204 0x46C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DCE_RX 0x204 0x46C 0x50C 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DTE_TX 0x204 0x46C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_GPIO5_IO10 0x204 0x46C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_TPSMP_HDATA12 0x204 0x46C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x208 0x470 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DCE_TX 0x208 0x470 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DTE_RX 0x208 0x470 0x50C 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI2_MOSI_GPIO5_IO11 0x208 0x470 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_TPSMP_HDATA13 0x208 0x470 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x20C 0x474 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DCE_CTS_B 0x20C 0x474 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DTE_RTS_B 0x20C 0x474 0x508 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_GPIO5_IO12 0x20C 0x474 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_TPSMP_HDATA14 0x20C 0x474 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_ECSPI2_SS0 0x210 0x478 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DCE_RTS_B 0x210 0x478 0x508 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DTE_CTS_B 0x210 0x478 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x210 0x478 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_TPSMP_HDATA15 0x210 0x478 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x214 0x47C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_ENET1_MDC 0x214 0x47C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_GPIO5_IO14 0x214 0x47C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_TPSMP_HDATA16 0x214 0x47C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x218 0x480 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_ENET1_MDIO 0x218 0x480 0x4C0 0x1 0x2 +#define MX8MM_IOMUXC_I2C1_SDA_GPIO5_IO15 0x218 0x480 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_TPSMP_HDATA17 0x218 0x480 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x21C 0x484 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_ENET1_1588_EVENT1_IN 0x21C 0x484 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_GPIO5_IO16 0x21C 0x484 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_TPSMP_HDATA18 0x21C 0x484 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x220 0x488 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_ENET1_1588_EVENT1_OUT 0x220 0x488 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_GPIO5_IO17 0x220 0x488 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_TPSMP_HDATA19 0x220 0x488 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x224 0x48C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_PWM4_OUT 0x224 0x48C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_GPT2_CLK 0x224 0x48C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_GPIO5_IO18 0x224 0x48C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_TPSMP_HDATA20 0x224 0x48C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x228 0x490 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_PWM3_OUT 0x228 0x490 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_GPT3_CLK 0x228 0x490 0x000 0x2 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_GPIO5_IO19 0x228 0x490 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_TPSMP_HDATA21 0x228 0x490 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x22C 0x494 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_PWM2_OUT 0x22C 0x494 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_PCIE1_CLKREQ_B 0x22C 0x494 0x524 0x12 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_GPIO5_IO20 0x22C 0x494 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_TPSMP_HDATA22 0x22C 0x494 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x230 0x498 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_PWM1_OUT 0x230 0x498 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_PCIE2_CLKREQ_B 0x230 0x498 0x528 0x2 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_GPIO5_IO21 0x230 0x498 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_TPSMP_HDATA23 0x230 0x498 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x234 0x49C 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_RXD_UART1_DTE_TX 0x234 0x49C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART1_RXD_ECSPI3_SCLK 0x234 0x49C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x23C 0x4A4 0x4FC 0x0 0x0 +#define MX8MM_IOMUXC_UART2_RXD_UART2_DTE_TX 0x23C 0x4A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART2_RXD_ECSPI3_MISO 0x23C 0x4A4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART2_RXD_GPIO5_IO24 0x23C 0x4A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART2_RXD_TPSMP_HDATA26 0x23C 0x4A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x240 0x4A8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART2_TXD_UART2_DTE_RX 0x240 0x4A8 0x4FC 0x0 0x1 +#define MX8MM_IOMUXC_UART2_TXD_ECSPI3_SS0 0x240 0x4A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART2_TXD_GPIO5_IO25 0x240 0x4A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART2_TXD_TPSMP_HDATA27 0x240 0x4A8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x244 0x4AC 0x504 0x0 0x2 +#define MX8MM_IOMUXC_UART3_RXD_UART3_DTE_TX 0x244 0x4AC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x244 0x4AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART1_DTE_RTS_B 0x244 0x4AC 0x4F0 0x1 0x0 +#define MX8MM_IOMUXC_UART3_RXD_GPIO5_IO26 0x244 0x4AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART3_RXD_TPSMP_HDATA28 0x244 0x4AC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x248 0x4B0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART3_TXD_UART3_DTE_RX 0x248 0x4B0 0x504 0x0 0x3 +#define MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x248 0x4B0 0x4F0 0x1 0x1 +#define MX8MM_IOMUXC_UART3_TXD_UART1_DTE_CTS_B 0x248 0x4B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART3_TXD_GPIO5_IO27 0x248 0x4B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART3_TXD_TPSMP_HDATA29 0x248 0x4B0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x24C 0x4B4 0x50C 0x0 0x2 +#define MX8MM_IOMUXC_UART4_RXD_UART4_DTE_TX 0x24C 0x4B4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART2_DCE_CTS_B 0x24C 0x4B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART2_DTE_RTS_B 0x24C 0x4B4 0x4F8 0x1 0x0 +#define MX8MM_IOMUXC_UART4_RXD_PCIE1_CLKREQ_B 0x24C 0x4B4 0x524 0x2 0x1 +#define MX8MM_IOMUXC_UART4_RXD_GPIO5_IO28 0x24C 0x4B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART4_RXD_TPSMP_HDATA30 0x24C 0x4B4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x250 0x4B8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART4_TXD_UART4_DTE_RX 0x250 0x4B8 0x50C 0x0 0x3 +#define MX8MM_IOMUXC_UART4_TXD_UART2_DCE_RTS_B 0x250 0x4B8 0x4F8 0x1 0x1 +#define MX8MM_IOMUXC_UART4_TXD_UART2_DTE_CTS_B 0x250 0x4B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART4_TXD_PCIE2_CLKREQ_B 0x250 0x4B8 0x528 0x2 0x1 +#define MX8MM_IOMUXC_UART4_TXD_GPIO5_IO29 0x250 0x4B8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART4_TXD_TPSMP_HDATA31 0x250 0x4B8 0x000 0x7 0x0 + +#endif /* __DTS_IMX8MM_PINFUNC_H */ -- cgit v1.2.1 From e2a2e56e40822ab78e304198387f61314af7d7ce Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 22 Jan 2019 15:41:11 +0100 Subject: arm64: dump: no need to check return value of debugfs_create functions When calling debugfs functions, there is no need to ever check the return value. The function can work or not, but the code logic should never do something different based on this. Cc: Will Deacon Cc: Marc Zyngier Cc: Peng Donglin Cc: Signed-off-by: Greg Kroah-Hartman Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ptdump.h | 9 +++------ arch/arm64/mm/dump.c | 4 ++-- arch/arm64/mm/ptdump_debugfs.c | 7 ++----- 3 files changed, 7 insertions(+), 13 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 6afd8476c60c..9e948a93d26c 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -34,13 +34,10 @@ struct ptdump_info { void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); #ifdef CONFIG_ARM64_PTDUMP_DEBUGFS -int ptdump_debugfs_register(struct ptdump_info *info, const char *name); +void ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else -static inline int ptdump_debugfs_register(struct ptdump_info *info, - const char *name) -{ - return 0; -} +static inline void ptdump_debugfs_register(struct ptdump_info *info, + const char *name) { } #endif void ptdump_check_wx(void); #endif /* CONFIG_ARM64_PTDUMP_CORE */ diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..08c250350b8a 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -407,7 +407,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { ptdump_initialize(); - return ptdump_debugfs_register(&kernel_ptdump_info, - "kernel_page_tables"); + ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); + return 0; } device_initcall(ptdump_init); diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 24d786fc3a4c..064163f25592 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -12,10 +12,7 @@ static int ptdump_show(struct seq_file *m, void *v) } DEFINE_SHOW_ATTRIBUTE(ptdump); -int ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { - struct dentry *pe; - pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); - return pe ? 0 : -ENOMEM; - + debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } -- cgit v1.2.1 From d0a060be573bfbf8753a15dca35497db5e968bb0 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 30 Jan 2019 12:02:44 +0000 Subject: arm64: add ptrace regsets for ptrauth key management Add two new ptrace regsets, which can be used to request and change the pointer authentication keys of a thread. NT_ARM_PACA_KEYS gives access to the instruction/data address keys, and NT_ARM_PACG_KEYS to the generic authentication key. The keys are also part of the core dump file of the process. The regsets are only exposed if the kernel is compiled with CONFIG_CHECKPOINT_RESTORE=y, as the only intended use case is checkpointing and restoring processes that are using pointer authentication. (This can be changed later if there are other use cases.) Reviewed-by: Dave Martin Signed-off-by: Kristina Martsenko Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/ptrace.h | 13 ++++ arch/arm64/kernel/ptrace.c | 147 +++++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 28d77c9ed531..d78623acb649 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -233,6 +233,19 @@ struct user_pac_mask { __u64 insn_mask; }; +/* pointer authentication keys (NT_ARM_PACA_KEYS, NT_ARM_PACG_KEYS) */ + +struct user_pac_address_keys { + __uint128_t apiakey; + __uint128_t apibkey; + __uint128_t apdakey; + __uint128_t apdbkey; +}; + +struct user_pac_generic_keys { + __uint128_t apgakey; +}; + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..a86413be5a2d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -979,6 +979,131 @@ static int pac_mask_get(struct task_struct *target, return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1); } + +#ifdef CONFIG_CHECKPOINT_RESTORE +static __uint128_t pac_key_to_user(const struct ptrauth_key *key) +{ + return (__uint128_t)key->hi << 64 | key->lo; +} + +static struct ptrauth_key pac_key_from_user(__uint128_t ukey) +{ + struct ptrauth_key key = { + .lo = (unsigned long)ukey, + .hi = (unsigned long)(ukey >> 64), + }; + + return key; +} + +static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apiakey = pac_key_to_user(&keys->apia); + ukeys->apibkey = pac_key_to_user(&keys->apib); + ukeys->apdakey = pac_key_to_user(&keys->apda); + ukeys->apdbkey = pac_key_to_user(&keys->apdb); +} + +static void pac_address_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_address_keys *ukeys) +{ + keys->apia = pac_key_from_user(ukeys->apiakey); + keys->apib = pac_key_from_user(ukeys->apibkey); + keys->apda = pac_key_from_user(ukeys->apdakey); + keys->apdb = pac_key_from_user(ukeys->apdbkey); +} + +static int pac_address_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_address_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + int ret; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_address_keys_from_user(keys, &user_keys); + + return 0; +} + +static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apgakey = pac_key_to_user(&keys->apga); +} + +static void pac_generic_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_generic_keys *ukeys) +{ + keys->apga = pac_key_from_user(ukeys->apgakey); +} + +static int pac_generic_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_generic_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + int ret; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_generic_keys_from_user(keys, &user_keys); + + return 0; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ #endif /* CONFIG_ARM64_PTR_AUTH */ enum aarch64_regset { @@ -995,6 +1120,10 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, +#ifdef CONFIG_CHECKPOINT_RESTORE + REGSET_PACA_KEYS, + REGSET_PACG_KEYS, +#endif #endif }; @@ -1074,6 +1203,24 @@ static const struct user_regset aarch64_regsets[] = { .get = pac_mask_get, /* this cannot be set dynamically */ }, +#ifdef CONFIG_CHECKPOINT_RESTORE + [REGSET_PACA_KEYS] = { + .core_note_type = NT_ARM_PACA_KEYS, + .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_address_keys_get, + .set = pac_address_keys_set, + }, + [REGSET_PACG_KEYS] = { + .core_note_type = NT_ARM_PACG_KEYS, + .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_generic_keys_get, + .set = pac_generic_keys_set, + }, +#endif #endif }; -- cgit v1.2.1 From 8aa67d18a466f338f79bac99f44e7891f4c71bbb Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 31 Jan 2019 18:23:37 +0000 Subject: arm64: entry: Remove unneeded need_resched() loop Since the enabling and disabling of IRQs within preempt_schedule_irq() is contained in a need_resched() loop, we don't need the outer arch code loop. Reported-by: Julien Thierry Reported-by: Will Deacon Reviewed-by: Julien Thierry Acked-by: Will Deacon Signed-off-by: Valentin Schneider Cc: Mark Rutland Cc: Marc Zyngier Cc: Julien Grall Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 0ec0c46b2c0c..4d0c81f29a60 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -611,7 +611,7 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count cbnz x24, 1f // preempt count != 0 - bl el1_preempt + bl preempt_schedule_irq // irq en/disable is done inside 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS @@ -620,15 +620,6 @@ el1_irq: kernel_exit 1 ENDPROC(el1_irq) -#ifdef CONFIG_PREEMPT -el1_preempt: - mov x24, lr -1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? - ret x24 -#endif - /* * EL0 mode handlers. */ -- cgit v1.2.1 From 5870970b9a828d8693aa6d15742573289d7dbcd0 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:39 +0000 Subject: arm64: Fix HCR.TGE status for NMI contexts When using VHE, the host needs to clear HCR_EL2.TGE bit in order to interact with guest TLBs, switching from EL2&0 translation regime to EL1&0. However, some non-maskable asynchronous event could happen while TGE is cleared like SDEI. Because of this address translation operations relying on EL2&0 translation regime could fail (tlb invalidation, userspace access, ...). Fix this by properly setting HCR_EL2.TGE when entering NMI context and clear it if necessary when returning to the interrupted context. Signed-off-by: Julien Thierry Suggested-by: Marc Zyngier Reviewed-by: Marc Zyngier Reviewed-by: James Morse Cc: Arnd Bergmann Cc: Will Deacon Cc: Marc Zyngier Cc: James Morse Cc: linux-arch@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hardirq.h | 31 +++++++++++++++++++++++++++++++ arch/arm64/kernel/irq.c | 3 +++ 2 files changed, 34 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 1473fc2f7ab7..89691c86640a 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -17,8 +17,12 @@ #define __ASM_HARDIRQ_H #include +#include #include +#include #include +#include +#include #define NR_IPI 7 @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu); #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 +struct nmi_ctx { + u64 hcr; +}; + +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); + +#define arch_nmi_enter() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + nmi_ctx->hcr = read_sysreg(hcr_el2); \ + if (!(nmi_ctx->hcr & HCR_TGE)) { \ + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ + isb(); \ + } \ + } \ + } while (0) + +#define arch_nmi_exit() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + if (!(nmi_ctx->hcr & HCR_TGE)) \ + write_sysreg(nmi_ctx->hcr, hcr_el2); \ + } \ + } while (0) + static inline void ack_bad_irq(unsigned int irq) { extern unsigned long irq_err_count; diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 780a12f59a8f..92fa81798fb9 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -33,6 +33,9 @@ unsigned long irq_err_count; +/* Only access this in an NMI enter/exit */ +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); + DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); int arch_show_interrupts(struct seq_file *p, int prec) -- cgit v1.2.1 From a82785a953e03444fe38616aed4d27b01da79a97 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:40 +0000 Subject: arm64: Remove unused daif related functions/macros There are some helpers to modify PSR.[DAIF] bits that are not referenced anywhere. The less these bits are available outside of local_irq_* functions the better. Get rid of those unused helpers. Signed-off-by: Julien Thierry Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 +--------- arch/arm64/include/asm/daifflags.h | 10 ---------- 2 files changed, 1 insertion(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4feb6119c3c9..7acf2436b578 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -62,16 +62,8 @@ .endm /* - * Enable and disable interrupts. + * Save/restore interrupts. */ - .macro disable_irq - msr daifset, #2 - .endm - - .macro enable_irq - msr daifclr, #2 - .endm - .macro save_and_disable_irq, flags mrs \flags, daif msr daifset, #2 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 8d91f2233135..546bc398553e 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -43,16 +43,6 @@ static inline unsigned long local_daif_save(void) return flags; } -static inline void local_daif_unmask(void) -{ - trace_hardirqs_on(); - asm volatile( - "msr daifclr, #0xf // local_daif_unmask" - : - : - : "memory"); -} - static inline void local_daif_restore(unsigned long flags) { if (!arch_irqs_disabled_flags(flags)) -- cgit v1.2.1 From c9bfdf734d4c0e7dc25f39dd636ba1952994865a Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:41 +0000 Subject: arm64: cpufeature: Set SYSREG_GIC_CPUIF as a boot system feature It is not supported to have some CPUs using GICv3 sysreg CPU interface while some others do not. Once ICC_SRE_EL1.SRE is set on a CPU, the bit cannot be cleared. Since matching this feature require setting ICC_SRE_EL1.SRE, it cannot be turned off if found on a CPU. Set the feature as STRICT_BOOT, if boot CPU has it, all other CPUs are required to have it. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Reviewed-by: Suzuki K Poulose Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Suzuki K Poulose Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f6d84e2c92fe..b9c0adf71a54 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1207,7 +1207,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, .field_pos = ID_AA64PFR0_GIC_SHIFT, -- cgit v1.2.1 From b90d2b22afdc7ce150a9ee7a8d82378bcfc395a5 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:42 +0000 Subject: arm64: cpufeature: Add cpufeature for IRQ priority masking Add a cpufeature indicating whether a cpu supports masking interrupts by priority. The feature will be properly enabled in a later patch. Signed-off-by: Julien Thierry Reviewed-by: Suzuki K Poulose Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Cc: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/cpufeature.h | 6 ++++++ arch/arm64/kernel/cpufeature.c | 23 +++++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 82e9099834ae..f6a76e43f39e 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -60,7 +60,8 @@ #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_GENERIC_AUTH_ARCH 40 #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 +#define ARM64_HAS_IRQ_PRIO_MASKING 42 -#define ARM64_NCAPS 42 +#define ARM64_NCAPS 43 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index dfcfba725d72..89c3f318f6be 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -612,6 +612,12 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); } +static inline bool system_uses_irq_prio_masking(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b9c0adf71a54..6f56e0ab63a1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1203,6 +1203,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return false; +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1480,6 +1488,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, }, #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI + { + /* + * Depends on having GICv3 + */ + .desc = "IRQ priority masking", + .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = can_use_gic_priorities, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_GIC_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#endif {}, }; -- cgit v1.2.1 From e99da7c6f51b487280406d8dc31cc7532cfb2017 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:43 +0000 Subject: arm/arm64: gic-v3: Add PMR and RPR accessors Add helper functions to access system registers related to interrupt priorities: PMR and RPR. Signed-off-by: Julien Thierry Reviewed-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Russell King Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/arch_gicv3.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index e278f94df0c9..37193e224a50 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -114,6 +114,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg_s(val, SYS_ICC_BPR1_EL1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg_s(val, SYS_ICC_PMR_EL1); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg_s(SYS_ICC_RPR_EL1); +} + #define gic_read_typer(c) readq_relaxed(c) #define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_read_lpir(c) readq_relaxed(c) -- cgit v1.2.1 From 3f1f3234bc2db1c16b9818b9a15a5d58ad45251c Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:44 +0000 Subject: irqchip/gic-v3: Switch to PMR masking before calling IRQ handler Mask the IRQ priority through PMR and re-enable IRQs at CPU level, allowing only higher priority interrupts to be received during interrupt handling. Signed-off-by: Julien Thierry Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Will Deacon Cc: Thomas Gleixner Cc: Jason Cooper Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/arch_gicv3.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 37193e224a50..b5f8142bf802 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -155,5 +155,22 @@ static inline u32 gic_read_rpr(void) #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) #define gits_read_vpendbaser(c) readq_relaxed(c) +static inline bool gic_prio_masking_enabled(void) +{ + return system_uses_irq_prio_masking(); +} + +static inline void gic_pmr_mask_irqs(void) +{ + /* Should not get called yet. */ + WARN_ON_ONCE(true); +} + +static inline void gic_arch_enable_irqs(void) +{ + /* Should not get called yet. */ + WARN_ON_ONCE(true); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ -- cgit v1.2.1 From cdbc81ddef43c8fdcbd3a26e1a7530c70b629cfc Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:45 +0000 Subject: arm64: ptrace: Provide definitions for PMR values Introduce fixed values for PMR that are going to be used to mask and unmask interrupts by priority. The current priority given to GIC interrupts is 0xa0, so clearing PMR's most significant bit is enough to mask interrupts. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: Oleg Nesterov Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ptrace.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index fce22c4b2f73..8b131bc8984d 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -25,6 +25,18 @@ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* + * PMR values used to mask/unmask interrupts. + * + * GIC priority masking works as follows: if an IRQ's priority is a higher value + * than the value held in PMR, that IRQ is masked. Lowering the value of PMR + * means masking more IRQs (or at least that the same IRQs remain masked). + * + * To mask interrupts, we clear the most significant bit of PMR. + */ +#define GIC_PRIO_IRQON 0xf0 +#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) + /* Additional SPSR bits not exposed in the UABI */ #define PSR_IL_BIT (1 << 20) -- cgit v1.2.1 From 133d05186325ce04494ea6488a6b86e50a446c12 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:46 +0000 Subject: arm64: Make PMR part of task context In order to replace PSR.I interrupt disabling/enabling with ICC_PMR_EL1 interrupt masking, ICC_PMR_EL1 needs to be saved/restored when taking/returning from an exception. This mimics the way hardware saves and restores PSR.I bit in spsr_el1 for exceptions and ERET. Add PMR to the registers to save in the pt_regs struct upon kernel entry, and restore it before ERET. Also, initialize it to a sane value when creating new tasks. Signed-off-by: Julien Thierry Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Oleg Nesterov Cc: Dave Martin Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 3 +++ arch/arm64/include/asm/ptrace.h | 14 +++++++++++--- arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry.S | 14 ++++++++++++++ arch/arm64/kernel/process.c | 6 ++++++ 5 files changed, 35 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f1a7ab18faf3..5d9ce62bdebd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -191,6 +191,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) memset(regs, 0, sizeof(*regs)); forget_syscall(regs); regs->pc = pc; + + if (system_uses_irq_prio_masking()) + regs->pmr_save = GIC_PRIO_IRQON; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 8b131bc8984d..ec60174c8c18 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -19,6 +19,8 @@ #ifndef __ASM_PTRACE_H #define __ASM_PTRACE_H +#include + #include /* Current Exception Level values, as contained in CurrentEL */ @@ -179,7 +181,8 @@ struct pt_regs { #endif u64 orig_addr_limit; - u64 unused; // maintain 16 byte alignment + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + u64 pmr_save; u64 stackframe[2]; }; @@ -214,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs) #define processor_mode(regs) \ ((regs)->pstate & PSR_MODE_MASK) -#define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT)) +#define irqs_priority_unmasked(regs) \ + (system_uses_irq_prio_masking() ? \ + (regs)->pmr_save == GIC_PRIO_IRQON : \ + true) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 0552b91d7666..7f40dcbdd51d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -73,6 +73,7 @@ int main(void) DEFINE(S_PC, offsetof(struct pt_regs, pc)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4d0c81f29a60..02f809a5c823 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -249,6 +249,12 @@ alternative_else_nop_endif msr sp_el0, tsk .endif + /* Save pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] +alternative_else_nop_endif + /* * Registers that may be useful after this macro is invoked: * @@ -269,6 +275,14 @@ alternative_else_nop_endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif + /* Restore pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + /* Ensure priority change is seen by redistributor */ + dsb sy +alternative_else_nop_endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a0f985a6ac50..6d410fc2849b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -232,6 +232,9 @@ void __show_regs(struct pt_regs *regs) printk("sp : %016llx\n", sp); + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + i = top_reg; while (i >= 0) { @@ -363,6 +366,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; + if (system_uses_irq_prio_masking()) + childregs->pmr_save = GIC_PRIO_IRQON; + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } -- cgit v1.2.1 From a9806aa259feb2f6fd582b6342c835a3482fccc6 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:47 +0000 Subject: arm64: Unmask PMR before going idle CPU does not received signals for interrupts with a priority masked by ICC_PMR_EL1. This means the CPU might not come back from a WFI instruction. Make sure ICC_PMR_EL1 does not mask interrupts when doing a WFI. Since the logic of cpu_do_idle is becoming a bit more complex than just two instructions, lets turn it from ASM to C. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ arch/arm64/mm/proc.S | 11 ----------- 2 files changed, 45 insertions(+), 11 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6d410fc2849b..3767fb21a5b8 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,6 +51,7 @@ #include #include +#include #include #include #include @@ -74,6 +75,50 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +static void __cpu_do_idle(void) +{ + dsb(sy); + wfi(); +} + +static void __cpu_do_idle_irqprio(void) +{ + unsigned long pmr; + unsigned long daif_bits; + + daif_bits = read_sysreg(daif); + write_sysreg(daif_bits | PSR_I_BIT, daif); + + /* + * Unmask PMR before going idle to make sure interrupts can + * be raised. + */ + pmr = gic_read_pmr(); + gic_write_pmr(GIC_PRIO_IRQON); + + __cpu_do_idle(); + + gic_write_pmr(pmr); + write_sysreg(daif_bits, daif); +} + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void cpu_do_idle(void) +{ + if (system_uses_irq_prio_masking()) + __cpu_do_idle_irqprio(); + else + __cpu_do_idle(); +} + /* * This is our default idle handler. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 73886a5f1f30..3ea4f3b84a8b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -55,17 +55,6 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - */ -ENTRY(cpu_do_idle) - dsb sy // WFI may enter a low-power mode - wfi - ret -ENDPROC(cpu_do_idle) - #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context -- cgit v1.2.1 From 85738e05dc38a80921e1e1944e5b835f6668fc30 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:48 +0000 Subject: arm64: kvm: Unmask PMR before entering guest Interrupts masked by ICC_PMR_EL1 will not be signaled to the CPU. This means that hypervisor will not receive masked interrupts while running a guest. We need to make sure that all maskable interrupts are masked from the time we call local_irq_disable() in the main run loop, and remain so until we call local_irq_enable() after returning from the guest, and we need to ensure that we see no interrupts at all (including pseudo-NMIs) in the middle of the VM world-switch, while at the same time we need to ensure we exit the guest when there are interrupts for the host. We can accomplish this with pseudo-NMIs enabled by: (1) local_irq_disable: set the priority mask (2) enter guest: set PSTATE.I (3) clear the priority mask (4) eret to guest (5) exit guest: set the priotiy mask clear PSTATE.I (and restore other host PSTATE bits) (6) local_irq_enable: clear the priority mask. Signed-off-by: Julien Thierry Acked-by: Catalin Marinas Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall Cc: Christoffer Dall Cc: Marc Zyngier Cc: Will Deacon Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++++ arch/arm64/kvm/hyp/switch.c | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..292c88263fd1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -474,10 +475,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static inline void kvm_arm_vhe_guest_enter(void) { local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } } static inline void kvm_arm_vhe_guest_exit(void) { + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ local_daif_restore(DAIF_PROCCTX_NOIRQ); /* diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..6a4c2d6c3287 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -22,6 +22,7 @@ #include +#include #include #include #include @@ -521,6 +522,17 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } + vcpu = kern_hyp_va(vcpu); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); @@ -573,6 +585,10 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + /* Returning to host will clear PSR.I, remask PMR if needed */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQOFF); + return exit_code; } -- cgit v1.2.1 From 4a503217ce37e1f4f3d9b681bbcbbac103776bf1 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:50 +0000 Subject: arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking Instead disabling interrupts by setting the PSR.I bit, use a priority higher than the one used for interrupts to mask them via PMR. When using PMR to disable interrupts, the value of PMR will be used instead of PSR.[DAIF] for the irqflags. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Ard Biesheuvel Reviewed-by: Catalin Marinas Cc: Will Deacon Cc: Ard Biesheuvel Cc: Oleg Nesterov Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 11 +++++ arch/arm64/include/asm/irqflags.h | 100 +++++++++++++++++++++++++++----------- 2 files changed, 83 insertions(+), 28 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7ed320895d1f..c9e9a6978e73 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) +/* + * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not. + * And EFI shouldn't really play around with priority masking as it is not aware + * which priorities the OS has assigned to its interrupts. + */ +#define arch_efi_save_flags(state_flags) \ + ((void)((state_flags) = read_sysreg(daif))) + +#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif) + + /* arch specific definitions used by the stub code */ /* diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 24692edf1a69..d4597b2c5729 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -18,7 +18,9 @@ #ifdef __KERNEL__ +#include #include +#include /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and @@ -36,33 +38,27 @@ /* * CPU interrupt mask handling. */ -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_irq_save\n" - "msr daifset, #2" - : "=r" (flags) - : - : "memory"); - return flags; -} - static inline void arch_local_irq_enable(void) { - asm volatile( - "msr daifclr, #2 // arch_local_irq_enable" - : + asm volatile(ALTERNATIVE( + "msr daifclr, #2 // arch_local_irq_enable\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" (GIC_PRIO_IRQON) : "memory"); } static inline void arch_local_irq_disable(void) { - asm volatile( - "msr daifset, #2 // arch_local_irq_disable" - : + asm volatile(ALTERNATIVE( + "msr daifset, #2 // arch_local_irq_disable", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" (GIC_PRIO_IRQOFF) : "memory"); } @@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { + unsigned long daif_bits; unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_save_flags" - : "=r" (flags) - : + + daif_bits = read_sysreg(daif); + + /* + * The asm is logically equivalent to: + * + * if (system_uses_irq_prio_masking()) + * flags = (daif_bits & PSR_I_BIT) ? + * GIC_PRIO_IRQOFF : + * read_sysreg_s(SYS_ICC_PMR_EL1); + * else + * flags = daif_bits; + */ + asm volatile(ALTERNATIVE( + "mov %0, %1\n" + "nop\n" + "nop", + "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n" + "ands %1, %1, " __stringify(PSR_I_BIT) "\n" + "csel %0, %0, %2, eq", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (flags), "+r" (daif_bits) + : "r" (GIC_PRIO_IRQOFF) : "memory"); + + return flags; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = arch_local_save_flags(); + + arch_local_irq_disable(); + return flags; } @@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void) */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile( - "msr daif, %0 // arch_local_irq_restore" - : - : "r" (flags) - : "memory"); + asm volatile(ALTERNATIVE( + "msr daif, %0\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) + : "+r" (flags) + : + : "memory"); } static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags & PSR_I_BIT; + int res; + + asm volatile(ALTERNATIVE( + "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n" + "nop", + "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n" + "cset %w0, ls", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (res) + : "r" ((int) flags) + : "memory"); + + return res; } #endif #endif -- cgit v1.2.1 From 8cb7eff32cc00697d4a37b1ed569c72ee2039ca4 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:51 +0000 Subject: arm64: daifflags: Include PMR in daifflags restore operations The addition of PMR should not bypass the semantics of daifflags. When DA_F are set, I bit is also set as no interrupts (even of higher priority) is allowed. When DA_F are cleared, I bit is cleared and interrupt enabling/disabling goes through ICC_PMR_EL1. Signed-off-by: Julien Thierry Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/daifflags.h | 50 +++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 546bc398553e..1dd3d7a38d34 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -18,6 +18,8 @@ #include +#include + #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT @@ -36,7 +38,13 @@ static inline unsigned long local_daif_save(void) { unsigned long flags; - flags = arch_local_save_flags(); + flags = read_sysreg(daif); + + if (system_uses_irq_prio_masking()) { + /* If IRQs are masked with PMR, reflect it in the flags */ + if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF) + flags |= PSR_I_BIT; + } local_daif_mask(); @@ -45,12 +53,46 @@ static inline unsigned long local_daif_save(void) static inline void local_daif_restore(unsigned long flags) { - if (!arch_irqs_disabled_flags(flags)) + bool irq_disabled = flags & PSR_I_BIT; + + if (!irq_disabled) { trace_hardirqs_on(); - arch_local_irq_restore(flags); + if (system_uses_irq_prio_masking()) + arch_local_irq_enable(); + } else if (!(flags & PSR_A_BIT)) { + /* + * If interrupts are disabled but we can take + * asynchronous errors, we can take NMIs + */ + if (system_uses_irq_prio_masking()) { + flags &= ~PSR_I_BIT; + /* + * There has been concern that the write to daif + * might be reordered before this write to PMR. + * From the ARM ARM DDI 0487D.a, section D1.7.1 + * "Accessing PSTATE fields": + * Writes to the PSTATE fields have side-effects on + * various aspects of the PE operation. All of these + * side-effects are guaranteed: + * - Not to be visible to earlier instructions in + * the execution stream. + * - To be visible to later instructions in the + * execution stream + * + * Also, writes to PMR are self-synchronizing, so no + * interrupts with a lower priority than PMR is signaled + * to the PE after the write. + * + * So we don't need additional synchronization here. + */ + arch_local_irq_disable(); + } + } + + write_sysreg(flags, daif); - if (arch_irqs_disabled_flags(flags)) + if (irq_disabled) trace_hardirqs_off(); } -- cgit v1.2.1 From e9ab7a2e333615497b3fc426c379c330230c2b50 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:52 +0000 Subject: arm64: alternative: Allow alternative status checking per cpufeature In preparation for the application of alternatives at different points during the boot process, provide the possibility to check whether alternatives for a feature of interest was already applied instead of having a global boolean for all alternatives. Make VHE enablement code check for the VHE feature instead of considering all alternatives. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Suzuki K Poulose Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative.h | 3 +-- arch/arm64/kernel/alternative.c | 21 +++++++++++++++++---- arch/arm64/kernel/cpufeature.c | 2 +- 3 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4b650ec1d7dd..9806a2357e70 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -14,8 +14,6 @@ #include #include -extern int alternatives_applied; - struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -28,6 +26,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); +bool alternative_is_applied(u16 cpufeature); #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length); diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index b5d603992d40..c947d2246017 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,13 +32,23 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -int alternatives_applied; +static int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; +bool alternative_is_applied(u16 cpufeature) +{ + if (WARN_ON(cpufeature >= ARM64_NCAPS)) + return false; + + return test_bit(cpufeature, applied_alternatives); +} + /* * Check if the target PC is within an alternative block. */ @@ -192,6 +202,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) dsb(ish); __flush_icache_all(); isb(); + + /* We applied all that was available */ + bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS); } } @@ -208,14 +221,14 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(alternatives_applied)) + while (!READ_ONCE(all_alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(alternatives_applied); + BUG_ON(all_alternatives_applied); __apply_alternatives(®ion, false); /* Barriers provided by the cache flushing */ - WRITE_ONCE(alternatives_applied, 1); + WRITE_ONCE(all_alternatives_applied, 1); } return 0; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6f56e0ab63a1..d607ea33228c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1118,7 +1118,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to * do anything here. */ - if (!alternatives_applied) + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } #endif -- cgit v1.2.1 From 0ceb0d56905e3d141fae77e5936d00eee9233473 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 31 Jan 2019 14:58:53 +0000 Subject: arm64: alternative: Apply alternatives early in boot process Currently alternatives are applied very late in the boot process (and a long time after we enable scheduling). Some alternative sequences, such as those that alter the way CPU context is stored, must be applied much earlier in the boot sequence. Introduce apply_boot_alternatives() to allow some alternatives to be applied immediately after we detect the CPU features of the boot CPU. Signed-off-by: Daniel Thompson [julien.thierry@arm.com: rename to fit new cpufeature framework better, apply BOOT_SCOPE feature early in boot] Signed-off-by: Julien Thierry Reviewed-by: Suzuki K Poulose Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Christoffer Dall Cc: Suzuki K Poulose Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative.h | 1 + arch/arm64/include/asm/cpufeature.h | 4 ++++ arch/arm64/kernel/alternative.c | 43 +++++++++++++++++++++++++++++++----- arch/arm64/kernel/cpufeature.c | 6 +++++ arch/arm64/kernel/smp.c | 7 ++++++ 5 files changed, 56 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 9806a2357e70..b9f8d787eea9 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -25,6 +25,7 @@ struct alt_instr { typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void __init apply_boot_alternatives(void); void __init apply_alternatives_all(void); bool alternative_is_applied(u16 cpufeature); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 89c3f318f6be..e505e1fbd2b9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -391,6 +391,10 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; +/* ARM64 CAPS + alternative_cb */ +#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) +extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index c947d2246017..a9b467763153 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -155,7 +155,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __apply_alternatives(void *alt_region, bool is_module) +static void __apply_alternatives(void *alt_region, bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -165,6 +166,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + if (!test_bit(alt->cpufeature, feature_mask)) + continue; + /* Use ARM64_CB_PATCH as an unconditional patch */ if (alt->cpufeature < ARM64_CB_PATCH && !cpus_have_cap(alt->cpufeature)) @@ -203,8 +207,11 @@ static void __apply_alternatives(void *alt_region, bool is_module) __flush_icache_all(); isb(); - /* We applied all that was available */ - bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS); + /* Ignore ARM64_CB bit from feature mask */ + bitmap_or(applied_alternatives, applied_alternatives, + feature_mask, ARM64_NCAPS); + bitmap_and(applied_alternatives, applied_alternatives, + cpu_hwcaps, ARM64_NCAPS); } } @@ -225,8 +232,13 @@ static int __apply_alternatives_multi_stop(void *unused) cpu_relax(); isb(); } else { + DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + + bitmap_complement(remaining_capabilities, boot_capabilities, + ARM64_NPATCHABLE); + BUG_ON(all_alternatives_applied); - __apply_alternatives(®ion, false); + __apply_alternatives(®ion, false, remaining_capabilities); /* Barriers provided by the cache flushing */ WRITE_ONCE(all_alternatives_applied, 1); } @@ -240,6 +252,24 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + struct alt_region region = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, + }; + + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + __apply_alternatives(®ion, false, &boot_capabilities[0]); +} + #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length) { @@ -247,7 +277,10 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; + DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + + bitmap_fill(all_capabilities, ARM64_NPATCHABLE); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d607ea33228c..b530fb24e6c6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -54,6 +54,9 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; +/* Need also bit for ARM64_CB_PATCH */ +DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1677,6 +1680,9 @@ static void update_cpu_capabilities(u16 scope_mask) if (caps->desc) pr_info("detected: %s\n", caps->desc); cpus_set_cap(caps->capability); + + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) + set_bit(caps->capability, boot_capabilities); } } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 1598d6f7200a..a944edd39d2d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -419,6 +419,13 @@ void __init smp_prepare_boot_cpu(void) */ jump_label_init(); cpuinfo_store_boot_cpu(); + + /* + * We now know enough about the boot CPU to apply the + * alternatives that cannot wait until interrupt handling + * and/or scheduling is enabled. + */ + apply_boot_alternatives(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) -- cgit v1.2.1 From e79321883842ca7b77d8a58fe8303e8da35c085e Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:55 +0000 Subject: arm64: Switch to PMR masking when starting CPUs Once the boot CPU has been prepared or a new secondary CPU has been brought up, use ICC_PMR_EL1 to mask interrupts on that CPU and clear PSR.I bit. Since ICC_PMR_EL1 is initialized at CPU bringup, avoid overwriting it in the GICv3 driver. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Marc Zyngier Cc: Will Deacon Cc: James Morse Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a944edd39d2d..824de7038967 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -180,6 +181,24 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +static void init_gic_priority_masking(void) +{ + u32 cpuflags; + + if (WARN_ON(!gic_enable_sre())) + return; + + cpuflags = read_sysreg(daif); + + WARN_ON(!(cpuflags & PSR_I_BIT)); + + gic_write_pmr(GIC_PRIO_IRQOFF); + + /* We can only unmask PSR.I if we can take aborts */ + if (!(cpuflags & PSR_A_BIT)) + write_sysreg(cpuflags & ~PSR_I_BIT, daif); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -206,6 +225,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ cpu_uninstall_idmap(); + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + preempt_disable(); trace_hardirqs_off(); @@ -426,6 +448,10 @@ void __init smp_prepare_boot_cpu(void) * and/or scheduling is enabled. */ apply_boot_alternatives(); + + /* Conditionally switch to GIC PMR for interrupt masking */ + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) -- cgit v1.2.1 From b334481ab76b2a9031aef5393b07de6d21a08244 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:58:56 +0000 Subject: arm64: gic-v3: Implement arch support for priority masking Implement architecture specific primitive allowing the GICv3 driver to use priorities to mask interrupts. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Acked-by: Marc Zyngier Cc: Marc Zyngier Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/arch_gicv3.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index b5f8142bf802..14b41ddc68ba 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -22,6 +22,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -162,14 +163,13 @@ static inline bool gic_prio_masking_enabled(void) static inline void gic_pmr_mask_irqs(void) { - /* Should not get called yet. */ - WARN_ON_ONCE(true); + BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF); + gic_write_pmr(GIC_PRIO_IRQOFF); } static inline void gic_arch_enable_irqs(void) { - /* Should not get called yet. */ - WARN_ON_ONCE(true); + asm volatile ("msr daifclr, #2" : : : "memory"); } #endif /* __ASSEMBLY__ */ -- cgit v1.2.1 From 7d31464adf20fb8c075a3a3dfe2002a195566510 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:00 +0000 Subject: arm64: Handle serror in NMI context Per definition of the daifflags, Serrors can occur during any interrupt context, that includes NMI contexts. Trying to nmi_enter in an nmi context will crash. Skip nmi_enter/nmi_exit when serror occurred during an NMI. Suggested-by: James Morse Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Mark Rutland Cc: Dave Martin Cc: James Morse Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4e2fb877f8d5..8ad119c3f665 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -898,13 +898,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) { - nmi_enter(); + const bool was_in_nmi = in_nmi(); + + if (!was_in_nmi) + nmi_enter(); /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - nmi_exit(); + if (!was_in_nmi) + nmi_exit(); } void __pte_error(const char *file, int line, unsigned long val) -- cgit v1.2.1 From 1234ad686fb1bde5a9c2447fc4c9df8430358763 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:01 +0000 Subject: arm64: Skip preemption when exiting an NMI Handling of an NMI should not set any TIF flags. For NMIs received from EL0 the current exit path is safe to use. However, an NMI received at EL1 could have interrupted some task context that has set the TIF_NEED_RESCHED flag. Preempting a task should not happen as a result of an NMI. Skip preemption after handling an NMI from EL1. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 02f809a5c823..35ed48469506 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -624,7 +624,15 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz x24, 1f // preempt count != 0 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA_F were cleared at start of handling. If anything is set in DAIF, + * we come back from an NMI, so skip preemption + */ + mrs x0, daif + orr x24, x24, x0 +alternative_else_nop_endif + cbnz x24, 1f // preempt count != 0 || NMI return path bl preempt_schedule_irq // irq en/disable is done inside 1: #endif -- cgit v1.2.1 From c25349fd3c8024cfebcc9b01ee6cfb093fab9be0 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:02 +0000 Subject: arm64: Skip irqflags tracing for NMI in IRQs disabled context When an NMI is raised while interrupts where disabled, the IRQ tracing already is in the correct state (i.e. hardirqs_off) and should be left as such when returning to the interrupted context. Check whether PMR was masking interrupts when the NMI was raised and skip IRQ tracing if necessary. Signed-off-by: Julien Thierry Acked-by: Marc Zyngier Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 35ed48469506..6bf7e12f9a2b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -617,7 +617,18 @@ el1_irq: kernel_entry 1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] +alternative_else + mov x20, #GIC_PRIO_IRQON +alternative_endif + cmp x20, #GIC_PRIO_IRQOFF + /* Irqs were disabled, don't trace */ + b.ls 1f +#endif bl trace_hardirqs_off +1: #endif irq_handler @@ -637,8 +648,18 @@ alternative_else_nop_endif 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI + /* + * if IRQs were disabled when we received the interrupt, we have an NMI + * and we are not re-enabling interrupt upon eret. Skip tracing. + */ + cmp x20, #GIC_PRIO_IRQOFF + b.ls 1f +#endif bl trace_hardirqs_on +1: #endif + kernel_exit 1 ENDPROC(el1_irq) -- cgit v1.2.1 From bc3c03ccb4641fb940b27a0d369431876923a8fe Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 31 Jan 2019 14:59:03 +0000 Subject: arm64: Enable the support of pseudo-NMIs Add a build option and a command line parameter to build and enable the support of pseudo-NMIs. Signed-off-by: Julien Thierry Suggested-by: Daniel Thompson Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 14 ++++++++++++++ arch/arm64/kernel/cpufeature.c | 10 +++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4cad67b9ec0a..c7a44bcfc385 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1327,6 +1327,20 @@ config ARM64_MODULE_PLTS bool select HAVE_MOD_ARCH_SPECIFIC +config ARM64_PSEUDO_NMI + bool "Support for NMI-like interrupts" + select CONFIG_ARM_GIC_V3 + help + Adds support for mimicking Non-Maskable Interrupts through the use of + GIC interrupt priority. This support requires version 3 or later of + Arm GIC. + + This high priority configuration for interrupts needs to be + explicitly enabled by setting the kernel parameter + "irqchip.gicv3_pseudo_nmi" to 1. + + If unsure, say N + config RELOCATABLE bool help diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b530fb24e6c6..e24e94d28767 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1207,10 +1207,18 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) #endif /* CONFIG_ARM64_PTR_AUTH */ #ifdef CONFIG_ARM64_PSEUDO_NMI +static bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return strtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, int scope) { - return false; + return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); } #endif -- cgit v1.2.1 From a80554fc36ba41d96af8e72fb54cd5d490e06c54 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 8 Feb 2019 09:36:48 +0000 Subject: arm64: irqflags: Fix clang build warnings Clang complains when passing asm operands that are smaller than the registers they are mapped to: arch/arm64/include/asm/irqflags.h:50:10: warning: value size does not match register size specified by the constraint and modifier [-Wasm-operand-widths] : "r" (GIC_PRIO_IRQON) Fix it by casting the affected input operands to a type of the correct size. Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Signed-off-by: Julien Thierry Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/irqflags.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index d4597b2c5729..43d8366c1e87 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -47,7 +47,7 @@ static inline void arch_local_irq_enable(void) "dsb sy", ARM64_HAS_IRQ_PRIO_MASKING) : - : "r" (GIC_PRIO_IRQON) + : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); } @@ -58,7 +58,7 @@ static inline void arch_local_irq_disable(void) "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", ARM64_HAS_IRQ_PRIO_MASKING) : - : "r" (GIC_PRIO_IRQOFF) + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); } @@ -91,7 +91,7 @@ static inline unsigned long arch_local_save_flags(void) "csel %0, %0, %2, eq", ARM64_HAS_IRQ_PRIO_MASKING) : "=&r" (flags), "+r" (daif_bits) - : "r" (GIC_PRIO_IRQOFF) + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); return flags; -- cgit v1.2.1 From 347cb6af8710b72cf9685fdc09d07873cf42d51f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 7 Jan 2019 13:36:20 -0500 Subject: dma-mapping: add a kconfig symbol for arch_setup_dma_ops availability Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS Acked-by: Catalin Marinas # arm64 --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/dma-mapping.h | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a4168d366127..63909f318d56 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -22,6 +22,7 @@ config ARM64 select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 95dbf3ef735a..de96507ee2c1 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -29,10 +29,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return NULL; } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent); -#define arch_setup_dma_ops arch_setup_dma_ops - #ifdef CONFIG_IOMMU_DMA void arch_teardown_dma_ops(struct device *dev); #define arch_teardown_dma_ops arch_teardown_dma_ops -- cgit v1.2.1 From dc2acded38957dfa6b7b7e0203b4b8cb8d818ce6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Dec 2018 22:14:44 +0100 Subject: dma-mapping: add a kconfig symbol for arch_teardown_dma_ops availability Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas # arm64 --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/dma-mapping.h | 5 ----- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 63909f318d56..87ec7be25e97 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -29,6 +29,7 @@ config ARM64 select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYSCALL_WRAPPER + select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK if !PREEMPT diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index de96507ee2c1..de98191e4c7d 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -29,11 +29,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return NULL; } -#ifdef CONFIG_IOMMU_DMA -void arch_teardown_dma_ops(struct device *dev); -#define arch_teardown_dma_ops arch_teardown_dma_ops -#endif - /* * Do not use this function in a driver, it is only provided for * arch/arm/mm/xen.c, which is used by arm64 as well. -- cgit v1.2.1 From 34e04eedd1cf1be714abb0e5976338cc72ccc05f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 1 Feb 2019 10:10:52 +0100 Subject: of: select OF_RESERVED_MEM automatically The OF_RESERVED_MEM can be used if we have either CMA or the generic declare coherent code built and we support the early flattened DT. So don't bother making it a user visible options that is selected by most configs that fit the above category, but just select it when the requirements are met. Signed-off-by: Christoph Hellwig Reviewed-by: Rob Herring --- arch/arm64/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 87ec7be25e97..fbcf521e1c9f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -165,7 +165,6 @@ config ARM64 select NEED_SG_DMA_LENGTH select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM select PCI_DOMAINS_GENERIC if PCI select PCI_ECAM if (ACPI && PCI) select PCI_SYSCALL if PCI -- cgit v1.2.1 From 7aa8d14641651a61a0b8892314a0bcfaceebe158 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 5 Jan 2019 15:49:50 +0000 Subject: arm/arm64: KVM: Introduce kvm_call_hyp_ret() Until now, we haven't differentiated between HYP calls that have a return value and those who don't. As we're about to change this, introduce kvm_call_hyp_ret(), and change all call sites that actually make use of a return value. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/debug.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..e54cb7c88a4e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -371,6 +371,7 @@ void kvm_arm_resume_guest(struct kvm *kvm); u64 __kvm_call_hyp(void *hypfn, ...); #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) +#define kvm_call_hyp_ret(f, ...) kvm_call_hyp(f, ##__VA_ARGS__) void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index f39801e4136c..fd917d6d12af 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) void kvm_arm_init_debug(void) { - __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2)); + __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2)); } /** -- cgit v1.2.1 From 18fc7bf8e041272f74a3237b99261d59c3ff0388 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 5 Jan 2019 15:57:56 +0000 Subject: arm64: KVM: Allow for direct call of HYP functions when using VHE When running VHE, there is no need to jump via some stub to perform a "HYP" function call, as there is a single address space. Let's thus change kvm_call_hyp() and co to perform a direct call in this case. Although this results in a bit of code expansion, it allows the compiler to check for type compatibility, something that we are missing so far. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/kvm_host.h | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e54cb7c88a4e..8b7702bdb219 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -370,8 +370,36 @@ void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); u64 __kvm_call_hyp(void *hypfn, ...); -#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) -#define kvm_call_hyp_ret(f, ...) kvm_call_hyp(f, ##__VA_ARGS__) + +/* + * The couple of isb() below are there to guarantee the same behaviour + * on VHE as on !VHE, where the eret to EL1 acts as a context + * synchronization event. + */ +#define kvm_call_hyp(f, ...) \ + do { \ + if (has_vhe()) { \ + f(__VA_ARGS__); \ + isb(); \ + } else { \ + __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ + } \ + } while(0) + +#define kvm_call_hyp_ret(f, ...) \ + ({ \ + typeof(f(__VA_ARGS__)) ret; \ + \ + if (has_vhe()) { \ + ret = f(__VA_ARGS__); \ + isb(); \ + } else { \ + ret = __kvm_call_hyp(kvm_ksym_ref(f), \ + ##__VA_ARGS__); \ + } \ + \ + ret; \ + }) void force_vm_exit(const cpumask_t *mask); void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); -- cgit v1.2.1 From 7cba8a8d0d39c8846d0095fc2d2225c7983f4009 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 5 Jan 2019 16:06:53 +0000 Subject: arm64: KVM: Drop VHE-specific HYP call stub We now call VHE code directly, without going through any central dispatching function. Let's drop that code. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- arch/arm64/kvm/hyp.S | 3 --- arch/arm64/kvm/hyp/hyp-entry.S | 12 ------------ 2 files changed, 15 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 952f6cb9cf72..2845aa680841 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -40,9 +40,6 @@ * arch/arm64/kernel/hyp_stub.S. */ ENTRY(__kvm_call_hyp) -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret -alternative_else_nop_endif - b __vhe_hyp_call ENDPROC(__kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 73c1b483ec39..2b1e686772bf 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -43,18 +43,6 @@ ldr lr, [sp], #16 .endm -ENTRY(__vhe_hyp_call) - do_el2_call - /* - * We used to rely on having an exception return to get - * an implicit isb. In the E2H case, we don't have it anymore. - * rather than changing all the leaf functions, just do it here - * before returning to the rest of the kernel. - */ - isb - ret -ENDPROC(__vhe_hyp_call) - el1_sync: // Guest trapped into EL2 mrs x0, esr_el2 -- cgit v1.2.1 From 32f139551954512bfdf9d558341af453bb8b12b4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 19 Jan 2019 15:29:54 +0000 Subject: arm/arm64: KVM: Statically configure the host's view of MPIDR We currently eagerly save/restore MPIDR. It turns out to be slightly pointless: - On the host, this value is known as soon as we're scheduled on a physical CPU - In the guest, this value cannot change, as it is set by KVM (and this is a read-only register) The result of the above is that we can perfectly avoid the eager saving of MPIDR_EL1, and only keep the restore. We just have to setup the host contexts appropriately at boot time. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/kvm_host.h | 8 ++++++++ arch/arm64/kvm/hyp/sysreg-sr.c | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8b7702bdb219..f497bb31031f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -418,6 +419,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); +static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, + int cpu) +{ + /* The host's MPIDR is immutable, so let's set it up at boot time */ + cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu); +} + void __kvm_enable_ssbs(void); static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..2498f86defcb 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -52,7 +52,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { - ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr); ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); -- cgit v1.2.1 From e329fb75d519e3dc3eb11b22d5bb846516be3521 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 11 Dec 2018 15:26:31 +0100 Subject: KVM: arm/arm64: Factor out VMID into struct kvm_vmid In preparation for nested virtualization where we are going to have more than a single VMID per VM, let's factor out the VMID data into a separate VMID data structure and change the VMID allocator to operate on this new structure instead of using a struct kvm. This also means that udate_vttbr now becomes update_vmid, and that the vttbr itself is generated on the fly based on the stage 2 page table base address and the vmid. We cache the physical address of the pgd when allocating the pgd to avoid doing the calculation on every entry to the guest and to avoid calling into potentially non-hyp-mapped code from hyp/EL2. If we wanted to merge the VMID allocator with the arm64 ASID allocator at some point in the future, it should actually become easier to do that after this patch. Note that to avoid mapping the kvm_vmid_bits variable into hyp, we simply forego the masking of the vmid value in kvm_get_vttbr and rely on update_vmid to always assign a valid vmid value (within the supported range). Reviewed-by: Marc Zyngier [maz: minor cleanups] Reviewed-by: Julien Thierry Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 9 ++++++--- arch/arm64/include/asm/kvm_hyp.h | 3 ++- arch/arm64/include/asm/kvm_mmu.h | 10 ++++++++-- 3 files changed, 16 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f497bb31031f..444dd1cb1958 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -57,16 +57,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); -struct kvm_arch { +struct kvm_vmid { /* The VMID generation used for the virt. memory system */ u64 vmid_gen; u32 vmid; +}; + +struct kvm_arch { + struct kvm_vmid vmid; /* stage2 entry level table */ pgd_t *pgd; + phys_addr_t pgd_phys; - /* VTTBR value associated with above pgd and vmid */ - u64 vttbr; /* VTCR_EL2 value for this VM */ u64 vtcr; diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index a80a7ef57325..4da765f2cca5 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #define __hyp_text __section(.hyp.text) notrace @@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...); static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) { write_sysreg(kvm->arch.vtcr, vtcr_el2); - write_sysreg(kvm->arch.vttbr, vttbr_el2); + write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); /* * ARM erratum 1165522 requires the actual execution of the above diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 8af4b1befa42..c423c8c4fc39 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -591,9 +591,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); } -static inline bool kvm_cpu_has_cnp(void) +static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) { - return system_supports_cnp(); + struct kvm_vmid *vmid = &kvm->arch.vmid; + u64 vmid_field, baddr; + u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; + + baddr = kvm->arch.pgd_phys; + vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; + return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } #endif /* __ASSEMBLY__ */ -- cgit v1.2.1 From b98c079ba480c606b13f6abf844187af09baeaab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Jan 2019 11:33:42 +0100 Subject: KVM: arm64: Fix ICH_ELRSR_EL2 sysreg naming We previously incorrectly named the define for this system register. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/sysreg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 72dc4c011014..3e5650903d6d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -426,7 +426,7 @@ #define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) #define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) #define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) -#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) +#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5) #define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) #define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x) -- cgit v1.2.1 From 09838de943d4c0ee75a99cd7665940705ab8dcea Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 9 Jan 2019 19:18:40 +0000 Subject: KVM: arm64: Reuse sys_reg() macro when searching the trap table Instead of having an open-coded macro, reuse the sys_reg() macro that does the exact same thing (the encoding is slightly different, but the ordering property is the same). Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- arch/arm64/kvm/sys_regs.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..1a5bea4285e4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -965,6 +965,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +#define reg_to_encoding(x) \ + sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \ + (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2); + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ @@ -1820,30 +1824,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target, } } -#define reg_to_match_value(x) \ - ({ \ - unsigned long val; \ - val = (x)->Op0 << 14; \ - val |= (x)->Op1 << 11; \ - val |= (x)->CRn << 7; \ - val |= (x)->CRm << 3; \ - val |= (x)->Op2; \ - val; \ - }) - static int match_sys_reg(const void *key, const void *elt) { const unsigned long pval = (unsigned long)key; const struct sys_reg_desc *r = elt; - return pval - reg_to_match_value(r); + return pval - reg_to_encoding(r); } static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num) { - unsigned long pval = reg_to_match_value(params); + unsigned long pval = reg_to_encoding(params); return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } -- cgit v1.2.1 From 84135d3d18da2ff17d3ad1a609b2818cc3049552 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 5 Jul 2018 16:48:23 +0100 Subject: KVM: arm/arm64: consolidate arch timer trap handlers At the moment we have separate system register emulation handlers for each timer register. Actually they are quite similar, and we rely on kvm_arm_timer_[gs]et_reg() for the actual emulation anyways, so let's just merge all of those handlers into one function, which just marshalls the arguments and then hands off to a set of common accessors. This makes extending the emulation to include EL2 timers much easier. Signed-off-by: Andre Przywara [Fixed 32-bit VM breakage and reduced to reworking existing code] Signed-off-by: Christoffer Dall [Fixed 32bit host, general cleanup] Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/sysreg.h | 4 +++ arch/arm64/kvm/sys_regs.c | 73 +++++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 40 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 3e5650903d6d..6482e8bcf1b8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -392,6 +392,10 @@ #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) +#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) +#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) + #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) #define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1a5bea4285e4..0d348500b24b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -990,44 +990,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } -static bool access_cntp_tval(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +static bool access_arch_timer(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { - u64 now = kvm_phys_timer_read(); - u64 cval; + enum kvm_arch_timers tmr; + enum kvm_arch_timer_regs treg; + u64 reg = reg_to_encoding(r); - if (p->is_write) { - kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, - p->regval + now); - } else { - cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); - p->regval = cval - now; + switch (reg) { + case SYS_CNTP_TVAL_EL0: + case SYS_AARCH32_CNTP_TVAL: + tmr = TIMER_PTIMER; + treg = TIMER_REG_TVAL; + break; + case SYS_CNTP_CTL_EL0: + case SYS_AARCH32_CNTP_CTL: + tmr = TIMER_PTIMER; + treg = TIMER_REG_CTL; + break; + case SYS_CNTP_CVAL_EL0: + case SYS_AARCH32_CNTP_CVAL: + tmr = TIMER_PTIMER; + treg = TIMER_REG_CVAL; + break; + default: + BUG(); } - return true; -} - -static bool access_cntp_ctl(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ - if (p->is_write) - kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); - else - p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); - - return true; -} - -static bool access_cntp_cval(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) -{ if (p->is_write) - kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); + kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); else - p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); + p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); return true; } @@ -1392,9 +1386,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, - { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval }, - { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl }, - { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval }, + { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, /* PMEVCNTRn_EL0 */ PMU_PMEVCNTR_EL0(0), @@ -1715,10 +1709,9 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, - /* CNTP_TVAL */ - { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval }, - /* CNTP_CTL */ - { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl }, + /* Arch Tmers */ + { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, + { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer }, /* PMEVCNTRn */ PMU_PMEVCNTR(0), @@ -1795,7 +1788,7 @@ static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ - { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, + { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, }; /* Target specific emulation tables */ -- cgit v1.2.1 From 64cf98fa5544aee6c547786ee32f92b796b30635 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 1 May 2016 22:29:58 +0200 Subject: KVM: arm/arm64: Move kvm_is_write_fault to header file Move this little function to the header files for arm/arm64 so other code can make use of it directly. Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 506386a3edde..a0d1ce9ae12b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -331,6 +331,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) return ESR_ELx_SYS64_ISS_RT(esr); } +static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_trap_is_iabt(vcpu)) + return false; + + return kvm_vcpu_dabt_iswrite(vcpu); +} + static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; -- cgit v1.2.1 From f7f2b15c3d42fa5754131b34a0f7ad5a5c3f777f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 31 Jan 2019 14:17:17 +0100 Subject: arm64: KVM: Expose sanitised cache type register to guest We currently permit CPUs in the same system to deviate in the exact topology of the caches, and we subsequently hide this fact from user space by exposing a sanitised value of the cache type register CTR_EL0. However, guests running under KVM see the bare value of CTR_EL0, which could potentially result in issues with, e.g., JITs or other pieces of code that are sensitive to misreported cache line sizes. So let's start trapping cache ID instructions if there is a mismatch, and expose the sanitised version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant to KVM user space, so update that part as well. Acked-by: Christoffer Dall Signed-off-by: Ard Biesheuvel Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 3 ++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/sys_regs.c | 59 ++++++++++++++++++++++++++++++++++-- 3 files changed, 61 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index a0d1ce9ae12b..9acccb1e3746 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -77,6 +77,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) */ if (!vcpu_el1_is_32bit(vcpu)) vcpu->arch.hcr_el2 |= HCR_TID3; + + if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE)) + vcpu->arch.hcr_el2 |= HCR_TID2; } static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6482e8bcf1b8..5b267dec6194 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -361,6 +361,7 @@ #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) +#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0d348500b24b..45a07cfc57ed 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1146,6 +1146,49 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return __set_id_reg(rd, uaddr, true); } +static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0); + return true; +} + +static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + p->regval = read_sysreg(clidr_el1); + return true; +} + +static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + vcpu_write_sys_reg(vcpu, p->regval, r->reg); + else + p->regval = vcpu_read_sys_reg(vcpu, r->reg); + return true; +} + +static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u32 csselr; + + if (p->is_write) + return write_to_read_only(vcpu, p, r); + + csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); + p->regval = get_ccsidr(csselr); + return true; +} + /* sys_reg_desc initialiser for known cpufeature ID registers */ #define ID_SANITISED(name) { \ SYS_DESC(SYS_##name), \ @@ -1363,7 +1406,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, - { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, + { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, + { SYS_DESC(SYS_CLIDR_EL1), access_clidr }, + { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, + { SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, @@ -1663,6 +1709,7 @@ static const struct sys_reg_desc cp14_64_regs[] = { * register). */ static const struct sys_reg_desc cp15_regs[] = { + { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, @@ -1779,6 +1826,10 @@ static const struct sys_reg_desc cp15_regs[] = { PMU_PMEVTYPER(30), /* PMCCFILTR */ { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, + + { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, + { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, + { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR }, }; static const struct sys_reg_desc cp15_64_regs[] = { @@ -2192,11 +2243,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, } FUNCTION_INVARIANT(midr_el1) -FUNCTION_INVARIANT(ctr_el0) FUNCTION_INVARIANT(revidr_el1) FUNCTION_INVARIANT(clidr_el1) FUNCTION_INVARIANT(aidr_el1) +static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r) +{ + ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0); +} + /* ->val is filled in by kvm_sys_reg_table_init() */ static struct sys_reg_desc invariant_sys_regs[] = { { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, -- cgit v1.2.1 From 793acf870ea3e492c6bb3edb5f8657d9c4f4903f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 31 Jan 2019 14:17:18 +0100 Subject: arm64: KVM: Describe data or unified caches as having 1 set and 1 way On SMP ARM systems, cache maintenance by set/way should only ever be done in the context of onlining or offlining CPUs, which is typically done by bare metal firmware and never in a virtual machine. For this reason, we trap set/way cache maintenance operations and replace them with conditional flushing of the entire guest address space. Due to this trapping, the set/way arguments passed into the set/way ops are completely ignored, and thus irrelevant. This also means that the set/way geometry is equally irrelevant, and we can simply report it as 1 set and 1 way, so that legacy 32-bit ARM system software (i.e., the kind that only receives odd fixes) doesn't take a performance hit due to the trapping when iterating over the cachelines. Acked-by: Christoffer Dall Signed-off-by: Ard Biesheuvel Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 3 ++- arch/arm64/kvm/sys_regs.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 9acccb1e3746..d3842791e1c4 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -78,7 +78,8 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) if (!vcpu_el1_is_32bit(vcpu)) vcpu->arch.hcr_el2 |= HCR_TID3; - if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE)) + if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || + vcpu_el1_is_32bit(vcpu)) vcpu->arch.hcr_el2 |= HCR_TID2; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 45a07cfc57ed..81a342679e60 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1186,6 +1186,21 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); p->regval = get_ccsidr(csselr); + + /* + * Guests should not be doing cache operations by set/way at all, and + * for this reason, we trap them and attempt to infer the intent, so + * that we can flush the entire guest's address space at the appropriate + * time. + * To prevent this trapping from causing performance problems, let's + * expose the geometry of all data and unified caches (which are + * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way. + * [If guests should attempt to infer aliasing properties from the + * geometry (which is not permitted by the architecture), they would + * only do so for virtually indexed caches.] + */ + if (!(csselr & 1)) // data or unified cache + p->regval &= ~GENMASK(27, 3); return true; } -- cgit v1.2.1 From 3644a35b0244ddaeafa170a371144c4f12682c98 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 25 Jan 2019 16:57:29 +0900 Subject: KVM: arm/arm64: Remove -I. header search paths The header search path -I. in kernel Makefiles is very suspicious; it allows the compiler to search for headers in the top of $(srctree), where obviously no header file exists. I was able to build without these extra header search paths. Acked-by: Christoffer Dall Signed-off-by: Masahiro Yamada Signed-off-by: Marc Zyngier --- arch/arm64/kvm/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 0f2a135ba15b..3089b3159135 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -4,8 +4,6 @@ # ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic -CFLAGS_arm.o := -I. -CFLAGS_mmu.o := -I. KVM=../../../virt/kvm -- cgit v1.2.1 From 05277f368c33fdc12d91464f25bb5656b808ec8a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 25 Jan 2019 16:57:30 +0900 Subject: KVM: arm/arm64: Prefix header search paths with $(srctree)/ Currently, the Kbuild core manipulates header search paths in a crazy way [1]. To fix this mess, I want all Makefiles to add explicit $(srctree)/ to the search paths in the srctree. Some Makefiles are already written in that way, but not all. The goal of this work is to make the notation consistent, and finally get rid of the gross hacks. Having whitespaces after -I does not matter since commit 48f6e3cf5bc6 ("kbuild: do not drop -I without parameter"). [1]: https://patchwork.kernel.org/patch/9632347/ Acked-by: Christoffer Dall Signed-off-by: Masahiro Yamada Signed-off-by: Marc Zyngier --- arch/arm64/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 3089b3159135..690e033a91c0 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -3,7 +3,7 @@ # Makefile for Kernel-based Virtual Machine module # -ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic +ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic KVM=../../../virt/kvm -- cgit v1.2.1 From 1b44471b555930cd8d03ae9be86bd2c32fbf0a92 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Thu, 14 Feb 2019 01:45:46 +0000 Subject: KVM: arm64: Fix comment for KVM_PHYS_SHIFT Since Suzuki K Poulose's work on Dynamic IPA support, KVM_PHYS_SHIFT will be used only when machine_type's bits[7:0] equal to 0 (by default). Thus the outdated comment should be fixed. Reviewed-by: Suzuki K Poulose Signed-off-by: Zenghui Yu Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index c423c8c4fc39..b0742a16c6c9 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v) }) /* - * We currently only support a 40bit IPA. + * We currently support using a VM-specified IPA size. For backward + * compatibility, the default IPA size is fixed to 40bits. */ #define KVM_PHYS_SHIFT (40) -- cgit v1.2.1 From ff4c25f26a71b79c70ea03b3935a1297439a8a85 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 3 Feb 2019 20:12:02 +0100 Subject: dma-mapping: improve selection of dma_declare_coherent availability This API is primarily used through DT entries, but two architectures and two drivers call it directly. So instead of selecting the config symbol for random architectures pull it in implicitly for the actual users. Also rename the Kconfig option to describe the feature better. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS Acked-by: Lee Jones Reviewed-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fbcf521e1c9f..e86fac1e6b03 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -139,7 +139,6 @@ config ARM64 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GCC_PLUGINS - select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING select HAVE_MEMBLOCK_NODE_MAP if NUMA -- cgit v1.2.1 From 7b9d3d11c058bfa8e0fc51cbf0888a46f89b35aa Mon Sep 17 00:00:00 2001 From: Xiaowei Bao Date: Thu, 21 Feb 2019 11:16:18 +0800 Subject: arm64: dts: Add the PCIE EP node in dts Add the PCIE EP node in dts for ls1046a. Signed-off-by: Xiaowei Bao Signed-off-by: Lorenzo Pieralisi Reviewed-by: Minghuan Lian Reviewed-by: Zhiqiang Hou Reviewed-by: Rob Herring --- arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 34 +++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index 9a2106e60e19..576262e434a5 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -657,6 +657,17 @@ status = "disabled"; }; + pcie_ep@3400000 { + compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + reg = <0x00 0x03400000 0x0 0x00100000 + 0x40 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + pcie@3500000 { compatible = "fsl,ls1046a-pcie"; reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ @@ -683,6 +694,17 @@ status = "disabled"; }; + pcie_ep@3500000 { + compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + reg = <0x00 0x03500000 0x0 0x00100000 + 0x48 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + pcie@3600000 { compatible = "fsl,ls1046a-pcie"; reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ @@ -709,6 +731,17 @@ status = "disabled"; }; + pcie_ep@3600000 { + compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"; + reg = <0x00 0x03600000 0x0 0x00100000 + 0x50 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + qdma: dma-controller@8380000 { compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma"; reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ @@ -729,7 +762,6 @@ queue-sizes = <64 64>; big-endian; }; - }; reserved-memory { -- cgit v1.2.1 From c88b093693ccbe41991ef2e9b1d251945e6e54ed Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 21 Feb 2019 11:42:32 +0000 Subject: arm64: KVM: Fix architecturally invalid reset value for FPEXC32_EL2 Due to what looks like a typo dating back to the original addition of FPEXC32_EL2 handling, KVM currently initialises this register to an architecturally invalid value. As a result, the VECITR field (RES1) in bits [10:8] is initialised with 0, and the two reserved (RES0) bits [6:5] are initialised with 1. (In the Common VFP Subarchitecture as specified by ARMv7-A, these two bits were IMP DEF. ARMv8-A removes them.) This patch changes the reset value from 0x70 to 0x700, which reflects the architectural constraints and is presumably what was originally intended. Cc: # 4.12.x- Cc: Christoffer Dall Fixes: 62a89c44954f ("arm64: KVM: 32bit handling of coprocessor traps") Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 81a342679e60..a398d04274b7 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1523,7 +1523,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, - { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, + { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, -- cgit v1.2.1 From e20119f7eaaaf6aad5b44f35155ce500429e17f6 Mon Sep 17 00:00:00 2001 From: Takeshi Kihara Date: Thu, 21 Feb 2019 13:59:38 +0100 Subject: arm64: dts: renesas: r8a77990: Fix SCIF5 DMA channels According to the R-Car Gen3 Hardware Manual Errata for Rev 1.50 of Feb 12, 2019, the DMA channels for SCIF5 are corrected from 16..47 to 0..15 on R-Car E3. Signed-off-by: Takeshi Kihara Fixes: a5ebe5e49a862e21 ("arm64: dts: renesas: r8a77990: Add SCIF-{0,1,3,4,5} device nodes") Signed-off-by: Geert Uytterhoeven Reviewed-by: Fabrizio Castro Signed-off-by: Simon Horman --- arch/arm64/boot/dts/renesas/r8a77990.dtsi | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index 786178cf1ffd..36f409091cfc 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for the R-Car E3 (R8A77990) SoC * - * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018-2019 Renesas Electronics Corp. */ #include @@ -1042,9 +1042,8 @@ <&cpg CPG_CORE R8A77990_CLK_S3D1C>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; - dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, - <&dmac2 0x5b>, <&dmac2 0x5a>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; resets = <&cpg 202>; status = "disabled"; -- cgit v1.2.1 From c21cd4ae82e169b394139243684aaacf31bfcdf8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 21 Feb 2019 15:04:28 +0100 Subject: arm64: dts: renesas: r8a774c0: Fix SCIF5 DMA channels Correct the DMA channels for SCIF5 from 16..47 to 0..15, as was done for R-Car E3. Signed-off-by: Takeshi Kihara Fixes: 2660a6af690ebbb4 ("arm64: dts: renesas: r8a774c0: Add SCIF and HSCIF nodes") Signed-off-by: Geert Uytterhoeven Reviewed-by: Fabrizio Castro Signed-off-by: Simon Horman --- arch/arm64/boot/dts/renesas/r8a774c0.dtsi | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index f2e390f7f1d5..6590b63268b0 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for the RZ/G2E (R8A774C0) SoC * - * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018-2019 Renesas Electronics Corp. */ #include @@ -990,9 +990,8 @@ <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; - dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, - <&dmac2 0x5b>, <&dmac2 0x5a>; - dma-names = "tx", "rx", "tx", "rx"; + dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; + dma-names = "tx", "rx"; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; resets = <&cpg 202>; status = "disabled"; -- cgit v1.2.1 From 47224e51ab778e918257b96c1a1b3735e4b8c15d Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Fri, 8 Feb 2019 17:04:25 +0000 Subject: arm64: Remove documentation about TIF_USEDFPU TIF_USEDFPU is not defined as thread flags for Arm64. So drop it from the documentation. Acked-by: Will Deacon Signed-off-by: Julien Grall Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/thread_info.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index bbca68b54732..eb3ef73e07cf 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -79,7 +79,6 @@ void arch_release_task_struct(struct task_struct *tsk); * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user - * TIF_USEDFPU - FPU was used by this task this quantum (SMP) */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 -- cgit v1.2.1 From 4caf8758b60b6f7f9773fd1d265cb5a7cf935c97 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Fri, 22 Feb 2019 09:32:50 +0000 Subject: arm64: Rename get_thread_info() The assembly macro get_thread_info() actually returns a task_struct and is analogous to the current/get_current macro/function. While it could be argued that thread_info sits at the start of task_struct and the intention could have been to return a thread_info, instances of loads from/stores to the address obtained from get_thread_info() use offsets that are generated with offsetof(struct task_struct, [...]). Rename get_thread_info() to state it returns a task_struct. Acked-by: Mark Rutland Signed-off-by: Julien Thierry Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 2 +- arch/arm64/include/asm/assembler.h | 6 +++--- arch/arm64/kernel/entry.S | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 4128bec033f6..f74909ba29bd 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -24,7 +24,7 @@ .endm .macro __uaccess_ttbr0_enable, tmp1, tmp2 - get_thread_info \tmp1 + get_current_task \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 mrs \tmp2, ttbr1_el1 extr \tmp2, \tmp2, \tmp1, #48 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 7acf2436b578..9c5c876a9ff2 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -528,9 +528,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* - * Return the current thread_info. + * Return the current task_struct. */ - .macro get_thread_info, rd + .macro get_current_task, rd mrs \rd, sp_el0 .endm @@ -713,7 +713,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .macro if_will_cond_yield_neon #ifdef CONFIG_PREEMPT - get_thread_info x0 + get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET cbz x0, .Lyield_\@ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6bf7e12f9a2b..c50a7a75f2e0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -185,7 +185,7 @@ alternative_cb_end .else add x21, sp, #S_FRAME_SIZE - get_thread_info tsk + get_current_task tsk /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] @@ -1104,7 +1104,7 @@ ENTRY(ret_from_fork) cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 -1: get_thread_info tsk +1: get_current_task tsk b ret_to_user ENDPROC(ret_from_fork) NOKPROBE(ret_from_fork) -- cgit v1.2.1 From 3e32131abc311a5cb9fddecc72cbd0b95ffcc10d Mon Sep 17 00:00:00 2001 From: Zhang Lei Date: Tue, 26 Feb 2019 18:43:41 +0000 Subject: arm64: Add workaround for Fujitsu A64FX erratum 010001 On the Fujitsu-A64FX cores ver(1.0, 1.1), memory access may cause an undefined fault (Data abort, DFSC=0b111111). This fault occurs under a specific hardware condition when a load/store instruction performs an address translation. Any load/store instruction, except non-fault access including Armv8 and SVE might cause this undefined fault. The TCR_ELx.NFD1 bit is used by the kernel when CONFIG_RANDOMIZE_BASE is enabled to mitigate timing attacks against KASLR where the kernel address space could be probed using the FFR and suppressed fault on SVE loads. Since this erratum causes spurious exceptions, which may corrupt the exception registers, we clear the TCR_ELx.NFDx=1 bits when booting on an affected CPU. Signed-off-by: Zhang Lei [Generated MIDR value/mask for __cpu_setup(), removed spurious-fault handler and always disabled the NFDx bits on affected CPUs] Signed-off-by: James Morse Tested-by: zhang.lei Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 19 +++++++++++++++++++ arch/arm64/include/asm/assembler.h | 20 ++++++++++++++++++++ arch/arm64/include/asm/cputype.h | 9 +++++++++ arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/proc.S | 1 + 5 files changed, 50 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c7a44bcfc385..3fd266a177b5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -643,6 +643,25 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config FUJITSU_ERRATUM_010001 + bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" + default y + help + This option adds workaround for Fujitsu-A64FX erratum E#010001. + On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory + accesses may cause undefined fault (Data abort, DFSC=0b111111). + This fault occurs under a specific hardware condition when a + load/store instruction performs an address translation using: + case-1 TTBR0_EL1 with TCR_EL1.NFD0 == 1. + case-2 TTBR0_EL2 with TCR_EL2.NFD0 == 1. + case-3 TTBR1_EL1 with TCR_EL1.NFD1 == 1. + case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. + + The workaround is to ensure these bits are clear in TCR_ELx. + The workaround only affect the Fujitsu-A64FX. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 9c5c876a9ff2..c5308d01e228 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -596,6 +597,25 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif .endm +/* + * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU. + */ + .macro tcr_clear_errata_bits, tcr, tmp1, tmp2 +#ifdef CONFIG_FUJITSU_ERRATUM_010001 + mrs \tmp1, midr_el1 + + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK + and \tmp1, \tmp1, \tmp2 + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001 + cmp \tmp1, \tmp2 + b.ne 10f + + mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001 + bic \tcr, \tcr, \tmp2 +10: +#endif /* CONFIG_FUJITSU_ERRATUM_010001 */ + .endm + /** * Errata workaround prior to disable MMU. Insert an ISB immediately prior * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 951ed1a4e5c9..2afb1338b48a 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -76,6 +76,7 @@ #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E +#define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -104,6 +105,8 @@ #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 +#define FUJITSU_CPU_PART_A64FX 0x001 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -122,6 +125,12 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) +#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) + +/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ +#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX +#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1)) +#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index e9b0a7d75184..a69259cc1f16 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -302,6 +302,7 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) /* diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 3ea4f3b84a8b..aa0817c9c4c3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -445,6 +445,7 @@ ENTRY(__cpu_setup) ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS + tcr_clear_errata_bits x10, x9, x5 #ifdef CONFIG_ARM64_USER_VA_BITS_52 ldr_l x9, vabits_user -- cgit v1.2.1 From 2c97a9cc35a7a73a7580a8e2632419ff3c0b0fe5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 18:04:54 +0000 Subject: arm64: io: Hook up __io_par() for inX() ordering Ensure that inX() provides the same ordering guarantees as readX() by hooking up __io_par() so that it maps directly to __iormb(). Reported-by: Andrew Murray Reviewed-by: Palmer Dabbelt Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index ee723835c1f4..8bb7210ac286 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -121,6 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) : "memory"); \ }) +#define __io_par(v) __iormb(v) #define __iowmb() wmb() #define mmiowb() do { } while (0) -- cgit v1.2.1 From a29c78234942fcfba2c5c305adc85b64332f9a95 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 15 Jan 2019 20:18:39 +0100 Subject: arm64: Kconfig.platforms: fix warning unmet direct dependencies When ARCH_MXC get enabled, ARM64_ERRATUM_845719 will be selected and this warning will happen when COMPAT isn't set. WARNING: unmet direct dependencies detected for ARM64_ERRATUM_845719 Depends on [n]: COMPAT [=n] Selected by [y]: - ARCH_MXC [=y] Rework to add 'if COMPAT' before ARM64_ERRATUM_845719 gets selected, since ARM64_ERRATUM_845719 depends on COMPAT. Acked-by: Will Deacon Signed-off-by: Anders Roxell Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig.platforms | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 251ecf34cb02..d4faca775d9c 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -145,7 +145,7 @@ config ARCH_MVEBU config ARCH_MXC bool "ARMv8 based NXP i.MX SoC family" select ARM64_ERRATUM_843419 - select ARM64_ERRATUM_845719 + select ARM64_ERRATUM_845719 if COMPAT help This enables support for the ARMv8 based SoCs in the NXP i.MX family. -- cgit v1.2.1 From 366e37e4da23f9df498cc9577cadcb354f7bd431 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 22 Feb 2019 15:42:23 +0100 Subject: arm64: avoid clang warning about self-assignment Building a preprocessed source file for arm64 now always produces a warning with clang because of the page_to_virt() macro assigning a variable to itself. Adding a new temporary variable avoids this issue. Fixes: 2813b9c02962 ("kasan, mm, arm64: tag non slab memory allocated via pagealloc") Reviewed-by: Andrey Konovalov Signed-off-by: Arnd Bergmann Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..6340aa8350d9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -316,8 +316,9 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - __addr = __tag_set(__addr, page_kasan_tag(page)); \ - ((void *)__addr); \ + unsigned long __addr_tag = \ + __tag_set(__addr, page_kasan_tag(page)); \ + ((void *)__addr_tag); \ }) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) -- cgit v1.2.1 From 3cd0ddb3deec43bd1cfcdd39d3dde37a0135d0c6 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 1 Mar 2019 14:19:06 +0000 Subject: Revert "arm64: uaccess: Implement unsafe accessors" This reverts commit 0bd3ef34d2a8dd4056560567073d8bfc5da92e39. There is ongoing work on objtool to identify incorrect uses of user_access_{begin,end}. Until this is sorted, do not enable the functionality on arm64. Also, on ARMv8.2 CPUs with hardware PAN and UAO support, there is no obvious performance benefit to the unsafe user accessors. Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 79 ++++++++++------------------------------ 1 file changed, 20 insertions(+), 59 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6a70c75ed9f4..8e408084b8c3 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -270,26 +270,31 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) #define __raw_get_user(x, ptr, err) \ do { \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", (x), (ptr), \ + __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", (x), (ptr), \ + __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", (x), (ptr), \ + __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", (x), (ptr), \ + __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ + uaccess_disable_not_uao(); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -297,13 +302,8 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ - unsigned long __gu_val; \ - __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - uaccess_enable_not_uao(); \ - __raw_get_user(__gu_val, __p, (err)); \ - uaccess_disable_not_uao(); \ - (x) = (__force __typeof__(*__p)) __gu_val; \ + __raw_get_user((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ @@ -334,26 +334,30 @@ do { \ #define __raw_put_user(x, ptr, err) \ do { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", (x), (ptr), \ + __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", (x), (ptr), \ + __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", (x), (ptr), \ + __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", (x), (ptr), \ + __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ + uaccess_disable_not_uao(); \ } while (0) #define __put_user_error(x, ptr, err) \ @@ -361,13 +365,9 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(__p); \ __p = uaccess_mask_ptr(__p); \ - uaccess_enable_not_uao(); \ - __raw_put_user(__pu_val, __p, (err)); \ - uaccess_disable_not_uao(); \ - } else { \ + __raw_put_user((x), __p, (err)); \ + } else { \ (err) = -EFAULT; \ } \ } while (0) @@ -381,45 +381,6 @@ do { \ #define put_user __put_user -static __must_check inline bool user_access_begin(const void __user *ptr, - size_t len) -{ - if (unlikely(!access_ok(ptr, len))) - return false; - - uaccess_enable_not_uao(); - return true; -} -#define user_access_begin(ptr, len) user_access_begin(ptr, len) -#define user_access_end() uaccess_disable_not_uao() - -#define unsafe_get_user(x, ptr, err) \ -do { \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - unsigned long __gu_val; \ - int __gu_err = 0; \ - might_fault(); \ - __chk_user_ptr(__p); \ - __p = uaccess_mask_ptr(__p); \ - __raw_get_user(__gu_val, __p, __gu_err); \ - (x) = (__force __typeof__(*__p)) __gu_val; \ - if (__gu_err != 0) \ - goto err; \ -} while (0) - -#define unsafe_put_user(x, ptr, err) \ -do { \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - int __pu_err = 0; \ - might_fault(); \ - __chk_user_ptr(__p); \ - __p = uaccess_mask_ptr(__p); \ - __raw_put_user(__pu_val, __p, __pu_err); \ - if (__pu_err != 0) \ - goto err; \ -} while (0) - extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ -- cgit v1.2.1 From b9a4b9d084d978f80eb9210727c81804588b42ff Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Mar 2019 13:28:00 +0000 Subject: arm64: debug: Don't propagate UNKNOWN FAR into si_code for debug signals FAR_EL1 is UNKNOWN for all debug exceptions other than those caused by taking a hardware watchpoint. Unfortunately, if a debug handler returns a non-zero value, then we will propagate the UNKNOWN FAR value to userspace via the si_addr field of the SIGTRAP siginfo_t. Instead, let's set si_addr to take on the PC of the faulting instruction, which we have available in the current pt_regs. Cc: Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index efb7b2cbead5..ef46925096f0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -824,11 +824,12 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } -asmlinkage int __exception do_debug_exception(unsigned long addr, +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); + unsigned long pc = instruction_pointer(regs); int rv; /* @@ -838,14 +839,14 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); - if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs))) + if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); - if (!inf->fn(addr, esr, regs)) { + if (!inf->fn(addr_if_watchpoint, esr, regs)) { rv = 1; } else { arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)addr, esr); + inf->sig, inf->code, (void __user *)pc, esr); rv = 0; } -- cgit v1.2.1 From 6bd288569b50bc89fa5513031086746968f585cb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Mar 2019 13:28:01 +0000 Subject: arm64: debug: Ensure debug handlers check triggering exception level Debug exception handlers may be called for exceptions generated both by user and kernel code. In many cases, this is checked explicitly, but in other cases things either happen to work by happy accident or they go slightly wrong. For example, executing 'brk #4' from userspace will enter the kprobes code and be ignored, but the instruction will be retried forever in userspace instead of delivering a SIGTRAP. Fix this issue in the most stable-friendly fashion by simply adding explicit checks of the triggering exception level to all of our debug exception handlers. Cc: Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/kgdb.c | 14 ++++++++++---- arch/arm64/kernel/probes/kprobes.c | 6 ++++++ 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index ce46c4cdf368..691854b77c7f 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) { - if (!kgdb_single_step) + if (user_mode(regs) || !kgdb_single_step) return DBG_HOOK_ERROR; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..ea32379de331 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int retval; + if (user_mode(regs)) + return DBG_HOOK_ERROR; + /* return error if this is not our step */ retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); @@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) int __kprobes kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kprobe_handler(regs); return DBG_HOOK_HANDLED; } -- cgit v1.2.1 From b855b58ac1b7891b219e1d9ef60c45c774cadefe Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Wed, 13 Feb 2019 12:10:09 +0000 Subject: arm64: mmu: drop paging_init comments The comments could not reflect the code, and it is easy to get what this function does from a straight-line reading of the code. So let's drop the comments Signed-off-by: Peng Fan Acked-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d6b6f1b169bb..402b6495ff58 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -654,10 +654,6 @@ static void __init map_kernel(pgd_t *pgdp) kasan_copy_shadow(pgdp); } -/* - * paging_init() sets up the page tables, initialises the zone memory - * maps and sets up the zero page. - */ void __init paging_init(void) { pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); -- cgit v1.2.1 From 337555744e6e39dd1d87698c6084dd88a606d60a Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 11 Mar 2019 23:29:21 -0700 Subject: memblock: memblock_phys_alloc_try_nid(): don't panic The memblock_phys_alloc_try_nid() function tries to allocate memory from the requested node and then falls back to allocation from any node in the system. The memblock_alloc_base() fallback used by this function panics if the allocation fails. Replace the memblock_alloc_base() fallback with the direct call to memblock_alloc_range_nid() and update the memblock_phys_alloc_try_nid() callers to check the returned value and panic in case of error. Link: http://lkml.kernel.org/r/1548057848-15136-7-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport Acked-by: Michael Ellerman [powerpc] Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Dennis Zhou Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Guo Ren Cc: Guo Ren [c-sky] Cc: Heiko Carstens Cc: Juergen Gross [Xen] Cc: Mark Salter Cc: Matt Turner Cc: Max Filippov Cc: Michal Simek Cc: Paul Burton Cc: Petr Mladek Cc: Richard Weinberger Cc: Rich Felker Cc: Rob Herring Cc: Rob Herring Cc: Russell King Cc: Stafford Horne Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/numa.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 7a0a555b366a..06a6f264f2dd 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -237,6 +237,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) pr_info("Initmem setup node %d []\n", nid); nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, nid); + nd = __va(nd_pa); /* report and initialize */ -- cgit v1.2.1 From ecc3e771f4ca98c52a072e41804434b4979bdf84 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 11 Mar 2019 23:29:26 -0700 Subject: memblock: memblock_phys_alloc(): don't panic Make the memblock_phys_alloc() function an inline wrapper for memblock_phys_alloc_range() and update the memblock_phys_alloc() callers to check the returned value and panic in case of error. Link: http://lkml.kernel.org/r/1548057848-15136-8-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Dennis Zhou Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Guo Ren Cc: Guo Ren [c-sky] Cc: Heiko Carstens Cc: Juergen Gross [Xen] Cc: Mark Salter Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Paul Burton Cc: Petr Mladek Cc: Richard Weinberger Cc: Rich Felker Cc: Rob Herring Cc: Rob Herring Cc: Russell King Cc: Stafford Horne Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 402b6495ff58..e97f018ff740 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -103,6 +103,8 @@ static phys_addr_t __init early_pgtable_alloc(void) void *ptr; phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + if (!phys) + panic("Failed to allocate page table page\n"); /* * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE -- cgit v1.2.1 From 8a7f97b902f4fb0d94b355b6b3f1fbd7154cafb9 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 11 Mar 2019 23:30:31 -0700 Subject: treewide: add checks for the return value of memblock_alloc*() Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport Signed-off-by: Anders Roxell Reviewed-by: Guo Ren [c-sky] Acked-by: Paul Burton [MIPS] Acked-by: Heiko Carstens [s390] Reviewed-by: Juergen Gross [Xen] Reviewed-by: Geert Uytterhoeven [m68k] Acked-by: Max Filippov [xtensa] Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Dennis Zhou Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Guo Ren Cc: Mark Salter Cc: Matt Turner Cc: Michael Ellerman Cc: Michal Simek Cc: Petr Mladek Cc: Richard Weinberger Cc: Rich Felker Cc: Rob Herring Cc: Rob Herring Cc: Russell King Cc: Stafford Horne Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/kernel/setup.c | 8 +++++--- arch/arm64/mm/kasan_init.c | 10 ++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 834b321a88f8..f8482fe5a190 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -208,6 +208,7 @@ static void __init request_standard_resources(void) struct memblock_region *region; struct resource *res; unsigned long i = 0; + size_t res_size; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); @@ -215,9 +216,10 @@ static void __init request_standard_resources(void) kernel_data.end = __pa_symbol(_end - 1); num_standard_resources = memblock.memory.cnt; - standard_resources = memblock_alloc_low(num_standard_resources * - sizeof(*standard_resources), - SMP_CACHE_BYTES); + res_size = num_standard_resources * sizeof(*standard_resources); + standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); + if (!standard_resources) + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); for_each_memblock(memory, region) { res = &standard_resources[i++]; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index f37a86d2a69d..296de39ddee5 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + return __pa(p); } @@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + return __pa(p); } -- cgit v1.2.1 From 7a9b6be9fe58194d9a349159176e8cc0d8f10ef8 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 8 Mar 2019 13:02:16 -0800 Subject: arm64: bcm2835: Add missing dependency on MFD_CORE. When adding the MFD dependency for power domains and WDT in bcm2835, I added it only on the arm32 side and missed it for arm64. Fixes: 5e6acc3e678e ("bcm2835-pm: Move bcm2835-watchdog's DT probe to an MFD.") Signed-off-by: Eric Anholt Reported-by: Stefan Wahren Acked-by: Stefan Wahren --- arch/arm64/Kconfig.platforms | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 251ecf34cb02..50f9fb562059 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -27,6 +27,7 @@ config ARCH_BCM2835 bool "Broadcom BCM2835 family" select TIMER_OF select GPIOLIB + select MFD_CORE select PINCTRL select PINCTRL_BCM2835 select ARM_AMBA -- cgit v1.2.1 From 037fc3368be46dc1a2a90f6e50c8cbce49d75fd6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 17 Mar 2019 11:01:09 +0900 Subject: kbuild: force all architectures except um to include mandatory-y Currently, every arch/*/include/uapi/asm/Kbuild explicitly includes the common Kbuild.asm file. Factor out the duplicated include directives to scripts/Makefile.asm-generic so that no architecture would opt out of the mandatory-y mechanism. um is not forced to include mandatory-y since it is a very exceptional case which does not support UAPI. Signed-off-by: Masahiro Yamada --- arch/arm64/include/uapi/asm/Kbuild | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 87eea29b24ab..602d137932dc 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -- cgit v1.2.1 From a872fc8bf0304fd56347e94468f07d7e82c679ea Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 13 Feb 2019 00:43:22 +0900 Subject: arm64: kprobes: Move extable address check into arch_prepare_kprobe() Move extable address check into arch_prepare_kprobe() from arch_within_kprobe_blacklist(). The blacklist is exposed via debugfs as a list of symbols. The extable entries are smaller, so must be filtered out by arch_prepare_kprobe(). Acked-by: Will Deacon Reviewed-by: James Morse Signed-off-by: Masami Hiramatsu Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 7fb6f3aa5ceb..16bc3b2e9351 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -102,6 +102,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if (in_exception_text(probe_addr)) return -EINVAL; + + if (search_exception_tables(probe_addr)) + return -EINVAL; + if (probe_addr >= (unsigned long) __start_rodata && probe_addr <= (unsigned long) __end_rodata) return -EINVAL; @@ -485,8 +489,7 @@ bool arch_within_kprobe_blacklist(unsigned long addr) (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || (addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - !!search_exception_tables(addr)) + addr < (unsigned long)__hyp_text_end)) return true; if (!is_kernel_in_hyp_mode()) { -- cgit v1.2.1 From b5586163de1ce90317cd4037f69b14105be9f656 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 13 Feb 2019 00:43:51 +0900 Subject: arm64: kprobes: Remove unneeded RODATA check Remove unneeded RODATA check from arch_prepare_kprobe(). Since check_kprobe_address_safe() already ensured that the probe address is in kernel text, we don't need to check whether the address in RODATA or not. That must be always false. Acked-by: Will Deacon Signed-off-by: Masami Hiramatsu Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 16bc3b2e9351..b168873147f5 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -91,8 +91,6 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long probe_addr = (unsigned long)p->addr; - extern char __start_rodata[]; - extern char __end_rodata[]; if (probe_addr & 0x3) return -EINVAL; @@ -106,10 +104,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if (search_exception_tables(probe_addr)) return -EINVAL; - if (probe_addr >= (unsigned long) __start_rodata && - probe_addr <= (unsigned long) __end_rodata) - return -EINVAL; - /* decode instruction */ switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { case INSN_REJECTED: /* insn not supported */ -- cgit v1.2.1 From 6e08af0f10dcde01f0bdcc64cf91fea9d25e77cc Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 13 Feb 2019 00:44:19 +0900 Subject: arm64: kprobes: Move exception_text check in blacklist Move exception/irqentry text address check in blacklist, since those are symbol based rejection. If we prohibit probing on the symbols in exception_text, those should be blacklisted. Acked-by: Will Deacon Signed-off-by: Masami Hiramatsu Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index b168873147f5..d6697930d075 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -98,9 +98,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) /* copy instruction */ p->opcode = le32_to_cpu(*p->addr); - if (in_exception_text(probe_addr)) - return -EINVAL; - if (search_exception_tables(probe_addr)) return -EINVAL; @@ -483,7 +480,8 @@ bool arch_within_kprobe_blacklist(unsigned long addr) (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || (addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end)) + addr < (unsigned long)__hyp_text_end) || + in_exception_text(addr)) return true; if (!is_kernel_in_hyp_mode()) { -- cgit v1.2.1 From 6a019a92aa580cd5abdaae578a2a297c9af80174 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 13 Feb 2019 00:44:48 +0900 Subject: arm64: kprobes: Use arch_populate_kprobe_blacklist() Use arch_populate_kprobe_blacklist() instead of arch_within_kprobe_blacklist() so that we can see the full blacklisted symbols under the debugfs. Acked-by: Will Deacon Signed-off-by: Masami Hiramatsu [catalin.marinas@arm.com: Add arch_populate_kprobe_blacklist() comment] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/probes/kprobes.c | 49 +++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 19 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d6697930d075..7a679caf4585 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -471,26 +471,37 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) return DBG_HOOK_HANDLED; } -bool arch_within_kprobe_blacklist(unsigned long addr) +/* + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). + */ +int __init arch_populate_kprobe_blacklist(void) { - if ((addr >= (unsigned long)__kprobes_text_start && - addr < (unsigned long)__kprobes_text_end) || - (addr >= (unsigned long)__entry_text_start && - addr < (unsigned long)__entry_text_end) || - (addr >= (unsigned long)__idmap_text_start && - addr < (unsigned long)__idmap_text_end) || - (addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - in_exception_text(addr)) - return true; - - if (!is_kernel_in_hyp_mode()) { - if ((addr >= (unsigned long)__hyp_idmap_text_start && - addr < (unsigned long)__hyp_idmap_text_end)) - return true; - } - - return false; + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start, + (unsigned long)__entry_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start, + (unsigned long)__exception_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, + (unsigned long)__idmap_text_end); + if (ret) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, + (unsigned long)__hyp_text_end); + if (ret || is_kernel_in_hyp_mode()) + return ret; + ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start, + (unsigned long)__hyp_idmap_text_end); + return ret; } void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) -- cgit v1.2.1 From 3dbcea54b3ff706c58f8e8d4470f5e5d3d24a6a0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 15 Mar 2019 12:22:36 +0000 Subject: arm64: apply workaround on A64FX v1r0 Fujitsu erratum 010001 applies to A64FX v0r0 and v1r0, and we try to handle either by masking MIDR with MIDR_FUJITSU_ERRATUM_010001_MASK before comparing it to MIDR_FUJITSU_ERRATUM_010001. Unfortunately, MIDR_FUJITSU_ERRATUM_010001 is constructed incorrectly using MIDR_VARIANT(), which is intended to extract the variant field from MIDR_EL1, rather than generate the field in-place. This results in MIDR_FUJITSU_ERRATUM_010001 being all-ones, and we only match A64FX v0r0. This patch uses MIDR_CPU_VAR_REV() to generate an in-place mask for the variant field, ensuring the we match both v0r0 and v1r0. Fixes: 3e32131abc311a5c ("arm64: Add workaround for Fujitsu A64FX erratum 010001") Reported-by: "Okamoto, Takayuki" Signed-off-by: Mark Rutland [catalin.marinas@arm.com: fixed the patch author] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 2afb1338b48a..f3b659587a36 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -129,7 +129,7 @@ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX -#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1)) +#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0)) #define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) #ifndef __ASSEMBLY__ -- cgit v1.2.1 From c82fd1e6bd55ecc001e610e5484e292a7d8a39fc Mon Sep 17 00:00:00 2001 From: William Cohen Date: Fri, 1 Mar 2019 15:00:41 -0500 Subject: arm64/stacktrace: Export save_stack_trace_regs() The ARM64 implements the save_stack_trace_regs function, but it is unusable for any diagnostic tooling compiled as a kernel module due the missing EXPORT_SYMBOL_GPL for the function. Export save_stack_trace_regs() to align with other architectures such as s390, openrisc, and powerpc. This is similar to the ARM64 export of save_stack_trace_tsk() added in git commit e27c7fa015d6. Signed-off-by: William Cohen Signed-off-by: Catalin Marinas --- arch/arm64/kernel/stacktrace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 1a29f2695ff2..d908b5e9e949 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -143,6 +143,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } +EXPORT_SYMBOL_GPL(save_stack_trace_regs); static noinline void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace, unsigned int nosched) -- cgit v1.2.1 From efd00c722ca855745fcc35a7e6675b5a782a3fc8 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 5 Mar 2019 21:40:57 +0800 Subject: arm64: Add MIDR encoding for HiSilicon Taishan CPUs Adding the MIDR encodings for HiSilicon Taishan v110 CPUs, which is used in Kunpeng ARM64 server SoCs. TSV110 is the abbreviation of Taishan v110. Signed-off-by: Hanjun Guo Reviewed-by: John Garry Reviewed-by: Zhangshaokun Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index f3b659587a36..5f1437099b99 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -77,6 +77,7 @@ #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_IMP_FUJITSU 0x46 +#define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -107,6 +108,8 @@ #define FUJITSU_CPU_PART_A64FX 0x001 +#define HISI_CPU_PART_TSV110 0xD01 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -126,6 +129,7 @@ #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) +#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX -- cgit v1.2.1 From 0ecc471a2cb7d4d386089445a727f47b59dc9b6e Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 5 Mar 2019 21:40:58 +0800 Subject: arm64: kpti: Whitelist HiSilicon Taishan v110 CPUs HiSilicon Taishan v110 CPUs didn't implement CSV3 field of the ID_AA64PFR0_EL1 and are not susceptible to Meltdown, so whitelist the MIDR in kpti_safe_list[] table. Signed-off-by: Hanjun Guo Reviewed-by: John Garry Reviewed-by: Zhangshaokun Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e24e94d28767..4061de10cea6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -963,6 +963,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), { /* sentinel */ } }; char const *str = "command line option"; -- cgit v1.2.1 From ebff0b0e3d3c862c16c487959db5e0d879632559 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Mar 2019 17:37:44 +0000 Subject: KVM: arm64: Reset the PMU in preemptible context We've become very cautious to now always reset the vcpu when nothing is loaded on the physical CPU. To do so, we now disable preemption and do a kvm_arch_vcpu_put() to make sure we have all the state in memory (and that it won't be loaded behind out back). This now causes issues with resetting the PMU, which calls into perf. Perf itself uses mutexes, which clashes with the lack of preemption. It is worth realizing that the PMU is fully emulated, and that no PMU state is ever loaded on the physical CPU. This means we can perfectly reset the PMU outside of the non-preemptible section. Fixes: e761a927bc9a ("KVM: arm/arm64: Reset the VCPU without preemption and vcpu state loaded") Reported-by: Julien Grall Tested-by: Julien Grall Signed-off-by: Marc Zyngier --- arch/arm64/kvm/reset.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f16a5f8ff2b4..e2a0500cd7a2 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int ret = -EINVAL; bool loaded; + /* Reset PMU outside of the non-preemptible section */ + kvm_pmu_vcpu_reset(vcpu); + preempt_disable(); loaded = (vcpu->cpu != -1); if (loaded) @@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.reset_state.reset = false; } - /* Reset PMU */ - kvm_pmu_vcpu_reset(vcpu); - /* Default workaround setup is enabled (if supported) */ if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; -- cgit v1.2.1 From a6ecfb11bf37743c1ac49b266595582b107b61d4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Mar 2019 12:47:11 +0000 Subject: KVM: arm/arm64: vgic-its: Take the srcu lock when writing to guest memory When halting a guest, QEMU flushes the virtual ITS caches, which amounts to writing to the various tables that the guest has allocated. When doing this, we fail to take the srcu lock, and the kernel shouts loudly if running a lockdep kernel: [ 69.680416] ============================= [ 69.680819] WARNING: suspicious RCU usage [ 69.681526] 5.1.0-rc1-00008-g600025238f51-dirty #18 Not tainted [ 69.682096] ----------------------------- [ 69.682501] ./include/linux/kvm_host.h:605 suspicious rcu_dereference_check() usage! [ 69.683225] [ 69.683225] other info that might help us debug this: [ 69.683225] [ 69.683975] [ 69.683975] rcu_scheduler_active = 2, debug_locks = 1 [ 69.684598] 6 locks held by qemu-system-aar/4097: [ 69.685059] #0: 0000000034196013 (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x244/0x3a0 [ 69.686087] #1: 00000000f2ed935e (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x250/0x3a0 [ 69.686919] #2: 000000005e71ea54 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.687698] #3: 00000000c17e548d (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.688475] #4: 00000000ba386017 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.689978] #5: 00000000c2c3c335 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xd0 [ 69.690729] [ 69.690729] stack backtrace: [ 69.691151] CPU: 2 PID: 4097 Comm: qemu-system-aar Not tainted 5.1.0-rc1-00008-g600025238f51-dirty #18 [ 69.691984] Hardware name: rockchip evb_rk3399/evb_rk3399, BIOS 2019.04-rc3-00124-g2feec69fb1 03/15/2019 [ 69.692831] Call trace: [ 69.694072] lockdep_rcu_suspicious+0xcc/0x110 [ 69.694490] gfn_to_memslot+0x174/0x190 [ 69.694853] kvm_write_guest+0x50/0xb0 [ 69.695209] vgic_its_save_tables_v0+0x248/0x330 [ 69.695639] vgic_its_set_attr+0x298/0x3a0 [ 69.696024] kvm_device_ioctl_attr+0x9c/0xd8 [ 69.696424] kvm_device_ioctl+0x8c/0xf8 [ 69.696788] do_vfs_ioctl+0xc8/0x960 [ 69.697128] ksys_ioctl+0x8c/0xa0 [ 69.697445] __arm64_sys_ioctl+0x28/0x38 [ 69.697817] el0_svc_common+0xd8/0x138 [ 69.698173] el0_svc_handler+0x38/0x78 [ 69.698528] el0_svc+0x8/0xc The fix is to obviously take the srcu lock, just like we do on the read side of things since bf308242ab98. One wonders why this wasn't fixed at the same time, but hey... Fixes: bf308242ab98 ("KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock") Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b0742a16c6c9..ebeefcf835e8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, return ret; } +static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, + const void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_write_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + #ifdef CONFIG_KVM_INDIRECT_VECTORS /* * EL2 vectors can be mapped and rerouted in a number of ways, -- cgit v1.2.1 From e5a5af7718610c819c4d368bb62655ee43a38011 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Wed, 20 Mar 2019 10:20:56 -0700 Subject: arm64: remove obsolete selection of MULTI_IRQ_HANDLER The arm64 config selects MULTI_IRQ_HANDLER, which was renamed to GENERIC_IRQ_MULTI_HANDLER by commit 4c301f9b6a94 ("ARM: Convert to GENERIC_IRQ_MULTI_HANDLER"). The 'new' option is already selected, so just remove the obsolete entry. Signed-off-by: Matthias Kaehlcke Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 117b2541ef3d..7e34b9eba5de 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -159,7 +159,6 @@ config ARM64 select IRQ_DOMAIN select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA - select MULTI_IRQ_HANDLER select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH select OF -- cgit v1.2.1 From 93958742192e7956d05989836ada9071f9ffe42e Mon Sep 17 00:00:00 2001 From: Jonathan Hunter Date: Mon, 25 Mar 2019 12:28:07 +0100 Subject: arm64: tegra: Disable CQE Support for SDMMC4 on Tegra186 Enabling CQE support on Tegra186 Jetson TX2 has introduced a regression that is causing accesses to the file-system on the eMMC to fail. Errors such as the following have been observed ... mmc2: running CQE recovery mmc2: mmc_select_hs400 failed, error -110 print_req_error: I/O error, dev mmcblk2, sector 8 flags 80700 mmc2: cqhci: CQE failed to exit halt state For now disable CQE support for Tegra186 until this issue is resolved. Fixes: dfd3cb6feb73 arm64: tegra: Add CQE Support for SDMMC4 Signed-off-by: Jonathan Hunter Signed-off-by: Thierry Reding Signed-off-by: Arnd Bergmann --- arch/arm64/boot/dts/nvidia/tegra186.dtsi | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index bb2045be8814..97aeb946ed5e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -321,7 +321,6 @@ nvidia,default-trim = <0x9>; nvidia,dqs-trim = <63>; mmc-hs400-1_8v; - supports-cqe; status = "disabled"; }; -- cgit v1.2.1 From 9e0a17db517d83d568bb7fa983b54759d4e34f1f Mon Sep 17 00:00:00 2001 From: Chen Zhou Date: Wed, 27 Mar 2019 21:51:16 +0800 Subject: arm64: replace memblock_alloc_low with memblock_alloc If we use "crashkernel=Y[@X]" and the start address is above 4G, the arm64 kdump capture kernel may call memblock_alloc_low() failure in request_standard_resources(). Replacing memblock_alloc_low() with memblock_alloc(). [ 0.000000] MEMBLOCK configuration: [ 0.000000] memory size = 0x0000000040650000 reserved size = 0x0000000004db7f39 [ 0.000000] memory.cnt = 0x6 [ 0.000000] memory[0x0] [0x00000000395f0000-0x000000003968ffff], 0x00000000000a0000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x1] [0x0000000039730000-0x000000003973ffff], 0x0000000000010000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x2] [0x0000000039780000-0x000000003986ffff], 0x00000000000f0000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x3] [0x0000000039890000-0x0000000039d0ffff], 0x0000000000480000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x4] [0x000000003ed00000-0x000000003ed2ffff], 0x0000000000030000 bytes on node 0 flags: 0x4 [ 0.000000] memory[0x5] [0x0000002040000000-0x000000207fffffff], 0x0000000040000000 bytes on node 0 flags: 0x0 [ 0.000000] reserved.cnt = 0x7 [ 0.000000] reserved[0x0] [0x0000002040080000-0x0000002041c4dfff], 0x0000000001bce000 bytes flags: 0x0 [ 0.000000] reserved[0x1] [0x0000002041c53000-0x0000002042c203f8], 0x0000000000fcd3f9 bytes flags: 0x0 [ 0.000000] reserved[0x2] [0x000000207da00000-0x000000207dbfffff], 0x0000000000200000 bytes flags: 0x0 [ 0.000000] reserved[0x3] [0x000000207ddef000-0x000000207fbfffff], 0x0000000001e11000 bytes flags: 0x0 [ 0.000000] reserved[0x4] [0x000000207fdf2b00-0x000000207fdfc03f], 0x0000000000009540 bytes flags: 0x0 [ 0.000000] reserved[0x5] [0x000000207fdfd000-0x000000207ffff3ff], 0x0000000000202400 bytes flags: 0x0 [ 0.000000] reserved[0x6] [0x000000207ffffe00-0x000000207fffffff], 0x0000000000000200 bytes flags: 0x0 [ 0.000000] Kernel panic - not syncing: request_standard_resources: Failed to allocate 384 bytes [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.1.0-next-20190321+ #4 [ 0.000000] Call trace: [ 0.000000] dump_backtrace+0x0/0x188 [ 0.000000] show_stack+0x24/0x30 [ 0.000000] dump_stack+0xa8/0xcc [ 0.000000] panic+0x14c/0x31c [ 0.000000] setup_arch+0x2b0/0x5e0 [ 0.000000] start_kernel+0x90/0x52c [ 0.000000] ---[ end Kernel panic - not syncing: request_standard_resources: Failed to allocate 384 bytes ]--- Link: https://www.spinics.net/lists/arm-kernel/msg715293.html Signed-off-by: Chen Zhou Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f8482fe5a190..413d566405d1 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -217,7 +217,7 @@ static void __init request_standard_resources(void) num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); - standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); + standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); if (!standard_resources) panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); -- cgit v1.2.1