diff options
Diffstat (limited to 'arch/arm64')
30 files changed, 398 insertions, 275 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..871f21783866 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -49,7 +49,7 @@ config ARM64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 If unsure, say Y. +config ARM64_ERRATUM_834220 + bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" + depends on KVM + default y + help + This option adds an alternative code sequence to work around ARM + erratum 834220 on Cortex-A57 parts up to r1p2. + + Affected Cortex-A57 parts might report a Stage 2 translation + fault as the result of a Stage 1 fault for load crossing a + page boundary when there is a permission or device memory + alignment fault at Stage 1 and a translation fault at Stage 2. + + The workaround is to verify that the Stage 1 translation + doesn't generate a fault before handling the Stage 2 fault. + Please note that this does not necessarily enable the workaround, + as it depends on the alternative framework, which will only patch + the kernel if an affected CPU is detected. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi index e81cd48d6245..925552e7b4f3 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi @@ -269,6 +269,7 @@ clock-frequency = <0>; /* Updated by bootloader */ voltage-ranges = <1800 1800 3300 3300>; sdhci,auto-cmd12; + little-endian; bus-width = <4>; }; @@ -277,6 +278,7 @@ reg = <0x0 0x2300000 0x0 0x10000>; interrupts = <0 36 0x4>; /* Level high type */ gpio-controller; + little-endian; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; @@ -287,6 +289,7 @@ reg = <0x0 0x2310000 0x0 0x10000>; interrupts = <0 36 0x4>; /* Level high type */ gpio-controller; + little-endian; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; @@ -297,6 +300,7 @@ reg = <0x0 0x2320000 0x0 0x10000>; interrupts = <0 37 0x4>; /* Level high type */ gpio-controller; + little-endian; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; @@ -307,6 +311,7 @@ reg = <0x0 0x2330000 0x0 0x10000>; interrupts = <0 37 0x4>; /* Level high type */ gpio-controller; + little-endian; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index ce47792a983d..f7bd9bf0bbb3 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c @@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey); static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-ce", - .cra_priority = 300, + .cra_priority = 250, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 030cdcb46c6b..2731d3b25ed2 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -77,6 +77,7 @@ #ifndef __ASSEMBLY__ #include <linux/stringify.h> +#include <asm/barrier.h> /* * Low-level accessors diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 624f9679f4b0..9622eb48f894 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -64,27 +64,31 @@ do { \ #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1; \ + union { typeof(*p) __val; char __c[1]; } __u; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ case 1: \ asm volatile ("ldarb %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u8 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 2: \ asm volatile ("ldarh %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u16 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 4: \ asm volatile ("ldar %w0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u32 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ case 8: \ asm volatile ("ldar %0, %1" \ - : "=r" (___p1) : "Q" (*p) : "memory"); \ + : "=r" (*(__u64 *)__u.__c) \ + : "Q" (*p) : "memory"); \ break; \ } \ - ___p1; \ + __u.__val; \ }) #define read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 7fbed6919b54..eb8432bb82b8 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,7 +23,6 @@ */ #include <linux/types.h> #include <linux/sched.h> -#include <linux/ptrace.h> #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ @@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) +#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) static inline void __user *arch_compat_alloc_user_space(long len) { diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..8f271b83f910 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,8 +29,9 @@ #define ARM64_HAS_PAN 4 #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 +#define ARM64_WORKAROUND_834220 7 -#define ARM64_NCAPS 7 +#define ARM64_NCAPS 8 #ifndef __ASSEMBLY__ @@ -46,8 +47,12 @@ enum ftr_type { #define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_NONSTRICT false /* SANITY check ignored */ +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + struct arm64_ftr_bits { - bool strict; /* CPU Sanity check: strict matching required ? */ + bool sign; /* Value is signed ? */ + bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; u8 width; @@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field) return cpuid_feature_extract_field_width(features, field, 4); } +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) +{ + return (u64)(features << (64 - width - field)) >> (64 - width); +} + +static inline unsigned int __attribute_const__ +cpuid_feature_extract_unsigned_field(u64 features, int field) +{ + return cpuid_feature_extract_unsigned_field_width(features, field, 4); +} + static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); @@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) { - return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); + return ftrp->sign ? + cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) : + cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 54d0ead41afc..61e08f360e31 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -18,7 +18,6 @@ #ifdef __KERNEL__ -#include <linux/acpi.h> #include <linux/types.h> #include <linux/vmalloc.h> @@ -26,22 +25,16 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0) -extern struct dma_map_ops *dma_ops; extern struct dma_map_ops dummy_dma_ops; static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (unlikely(!dev)) - return dma_ops; - else if (dev->archdata.dma_ops) + if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; - else if (acpi_disabled) - return dma_ops; /* - * When ACPI is enabled, if arch_set_dma_ops is not called, - * we will disable device DMA capability by setting it - * to dummy_dma_ops. + * We expect no ISA devices, and all other DMA masters are expected to + * have someone call arch_setup_dma_ops at device creation time. */ return &dummy_dma_ops; } diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index e54415ec6935..9732908bfc8a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp; /* Determine number of BRP registers available. */ static inline int get_num_brps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_BRPS_SHIFT); } /* Determine number of WRP registers available. */ static inline int get_num_wrps(void) { + u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1); return 1 + - cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), + cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_WRPS_SHIFT); } diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 23eb450b820b..8e8d30684392 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -7,4 +7,9 @@ struct pt_regs; extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +static inline int nr_legacy_irqs(void) +{ + return 0; +} + #endif diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..25a40213bd9b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -99,12 +99,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; } -static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) +/* + * vcpu_get_reg and vcpu_set_reg should always be passed a register number + * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on + * AArch32 with banked registers. + */ +static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, + u8 reg_num) { - if (vcpu_mode_is_32bit(vcpu)) - return vcpu_reg32(vcpu, reg_num); + return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; +} - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + if (reg_num != 31) + vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; } /* Get vcpu SPSR for current mode */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c0e87898ba96..24165784b803 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void) #define destroy_context(mm) do { } while(0) void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) /* * This is called when "tsk" is about to enter lazy TLB mode. diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9819a9426b69..63f52b55defe 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) @@ -275,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, * hardware updates of the pte (ptep_set_access_flags safely changes * valid ptes without going through an invalid entry). */ - if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && - pte_valid(*ptep)) { - BUG_ON(!pte_young(pte)); - BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); + if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && + pte_valid(*ptep) && pte_valid(pte)) { + VM_WARN_ONCE(!pte_young(pte), + "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", + __func__, pte_val(*ptep), pte_val(pte)); + VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), + "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", + __func__, pte_val(*ptep), pte_val(pte)); } set_pte(ptep, pte); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { (1 << MIDR_VARIANT_SHIFT) | 2), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_834220 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 834220", + .capability = ARM64_WORKAROUND_834220, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif #ifdef CONFIG_ARM64_ERRATUM_845719 { /* Cortex-A53 r0p[01234] */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c8cf89223b5a..0669c63281ea 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ +#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ + .sign = SIGNED, \ .strict = STRICT, \ .type = TYPE, \ .shift = SHIFT, \ @@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); .safe_val = SAFE_VAL, \ } +/* Define a feature with signed values */ +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + +/* Define a feature with unsigned value */ +#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ + __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) + #define ARM64_FTR_END \ { \ .width = 0, \ @@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { * Differing PARange is fine as long as all peripherals and memory are mapped * within the minimum PARange of all CPUs */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will * make use of *minLine */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_END, }; @@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), + U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 706679d0a0b4..212ae6361d8b 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -30,6 +30,7 @@ #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/delay.h> /* * In case the boot CPU is hotpluggable, we record its initial state and @@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v) */ seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + /* * Dump out the common processor features in a single line. * Userspace should read the hwcaps with getauxval(AT_HWCAP) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index de46b50f4cdf..4eeb17198cfa 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -127,7 +127,11 @@ static int __init uefi_init(void) table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; config_tables = early_memremap(efi_to_phys(efi.systab->tables), table_size); - + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, sizeof(efi_config_table_64_t), NULL); @@ -209,6 +213,14 @@ void __init efi_init(void) PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); memmap.phys_map = params.mmap; memmap.map = early_memremap(params.mmap, params.mmap_size); + if (memmap.map == NULL) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } memmap.map_end = memmap.map + params.mmap_size; memmap.desc_size = params.desc_size; memmap.desc_version = params.desc_ver; @@ -224,8 +236,9 @@ static bool __init efi_virtmap_init(void) { efi_memory_desc_t *md; + init_new_context(NULL, &efi_mm); + for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; pgprot_t prot; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -233,11 +246,6 @@ static bool __init efi_virtmap_init(void) if (md->virt_addr == 0) return false; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); @@ -254,7 +262,9 @@ static bool __init efi_virtmap_init(void) else prot = PAGE_KERNEL; - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, + md->num_pages << EFI_PAGE_SHIFT, + __pgprot(pgprot_val(prot) | PTE_NG)); } return true; } @@ -270,12 +280,12 @@ static int __init arm64_enable_runtime_services(void) if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); - return -1; + return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); - return -1; + return 0; } pr_info("Remapping and enabling EFI services.\n"); @@ -285,7 +295,7 @@ static int __init arm64_enable_runtime_services(void) mapsize); if (!memmap.map) { pr_err("Failed to remap EFI memory map\n"); - return -1; + return -ENOMEM; } memmap.map_end = memmap.map + mapsize; efi.memmap = &memmap; @@ -294,13 +304,13 @@ static int __init arm64_enable_runtime_services(void) sizeof(efi_system_table_t)); if (!efi.systab) { pr_err("Failed to remap EFI System Table\n"); - return -1; + return -ENOMEM; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); - return -1; + return -ENOMEM; } /* Set up runtime services function pointers */ @@ -329,14 +339,7 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - if (mm == &init_mm) - cpu_set_reserved_ttbr0(); - else - cpu_switch_mm(mm->pgd, mm); - - local_flush_tlb_all(); - if (icache_is_aivivt()) - __local_flush_icache_all(); + switch_mm(NULL, mm, NULL); } void efi_virtmap_load(void) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index fce95e17cf7f..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,3 +1,4 @@ +#include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/slab.h> #include <asm/cacheflush.h> @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) local_dbg_save(flags); /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) hw_breakpoint_restore(NULL); } + unpause_graph_tracing(); + /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1ee2c3937d4e..71426a78db12 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -5,6 +5,7 @@ */ #include <asm-generic/vmlinux.lds.h> +#include <asm/cache.h> #include <asm/kernel-pgtable.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -140,7 +141,7 @@ SECTIONS ARM_EXIT_KEEP(EXIT_DATA) } - PERCPU_SECTION(64) + PERCPU_SECTION(L1_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; @@ -158,7 +159,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); _data = .; _sdata = .; - RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) PECOFF_EDATA_PADDING _edata = .; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 68a0759b1375..15f0477b0d2a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; - trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), + trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), kvm_vcpu_hvc_get_imm(vcpu)); ret = kvm_psci_call(vcpu); diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) ENDPROC(__kvm_flush_vm_context) __kvm_hyp_panic: + // Stash PAR_EL1 before corrupting it in __restore_sysregs + mrs x0, par_el1 + push x0, xzr + // Guess the context by looking at VTTBR: // If zero, then we're already a host. // Otherwise restore a minimal host context before panicing. @@ -898,7 +902,7 @@ __kvm_hyp_panic: mrs x3, esr_el2 mrs x4, far_el2 mrs x5, hpfar_el2 - mrs x6, par_el1 + pop x6, xzr // active context PAR_EL1 mrs x7, tpidr_el2 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ @@ -914,7 +918,7 @@ __kvm_hyp_panic: ENDPROC(__kvm_hyp_panic) __hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" + .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" .align 2 @@ -1015,9 +1019,15 @@ el1_trap: b.ne 1f // Not an abort we care about /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 and x2, x1, #ESR_ELx_FSC_TYPE cmp x2, #FSC_PERM b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif /* * Check for Stage-1 page table walk, which is guaranteed diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) /* Note: These now point to the banked copies */ *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; /* Branch to exception vector */ if (sctlr & (1 << 13)) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 87a64e8db04c..d2650e84faf2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr) * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). */ static bool access_dcsw(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) @@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, * sys_regs and leave it in complete control of the caches. */ static bool access_vm_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { - unsigned long val; bool was_enabled = vcpu_has_cache_enabled(vcpu); BUG_ON(!p->is_write); - val = *vcpu_reg(vcpu, p->Rt); if (!p->is_aarch32) { - vcpu_sys_reg(vcpu, r->reg) = val; + vcpu_sys_reg(vcpu, r->reg) = p->regval; } else { if (!p->is_32bit) - vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; - vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; + vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); + vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); } kvm_toggle_cache(vcpu, was_enabled); @@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, * for both AArch64 and AArch32 accesses. */ static bool access_gic_sgi(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 val; - if (!p->is_write) return read_from_write_only(vcpu, p); - val = *vcpu_reg(vcpu, p->Rt); - vgic_v3_dispatch_sgi(vcpu, val); + vgic_v3_dispatch_sgi(vcpu, p->regval); return true; } static bool trap_raz_wi(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) @@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { - *vcpu_reg(vcpu, p->Rt) = (1 << 3); + p->regval = (1 << 3); return true; } } static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { @@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, } else { u32 val; asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); - *vcpu_reg(vcpu, p->Rt) = val; + p->regval = val; return true; } } @@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, * now use the debug registers. */ static bool trap_debug_regs(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { - vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); + vcpu_sys_reg(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); + p->regval = vcpu_sys_reg(vcpu, r->reg); } - trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); + trace_trap_reg(__func__, r->reg, p->is_write, p->regval); return true; } @@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, * hyp.S code switches between host and guest values in future. */ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, u64 *dbg_reg) { - u64 val = *vcpu_reg(vcpu, p->Rt); + u64 val = p->regval; if (p->is_32bit) { val &= 0xffffffffUL; @@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, } static inline void dbg_to_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, u64 *dbg_reg) { - u64 val = *dbg_reg; - + p->regval = *dbg_reg; if (p->is_32bit) - val &= 0xffffffffUL; - - *vcpu_reg(vcpu, p->Rt) = val; + p->regval &= 0xffffffffUL; } static inline bool trap_bvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu, } static inline bool trap_bcr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; @@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu, } static inline bool trap_wvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; @@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu, } static inline bool trap_wcr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; @@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { @@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); - *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | - (6 << 16) | (el3 << 14) | (el3 << 12)); + p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | + (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | + (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) + | (6 << 16) | (el3 << 14) | (el3 << 12)); return true; } } static bool trap_debug32(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { - vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); + vcpu_cp14(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); + p->regval = vcpu_cp14(vcpu, r->reg); } return true; @@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, */ static inline bool trap_xvr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu, u64 val = *dbg_reg; val &= 0xffffffffUL; - val |= *vcpu_reg(vcpu, p->Rt) << 32; + val |= p->regval << 32; *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { - *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; + p->regval = *dbg_reg >> 32; } trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); @@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) * Return 0 if the access has been handled, and -1 if not. */ static int emulate_cp(struct kvm_vcpu *vcpu, - const struct sys_reg_params *params, + struct sys_reg_params *params, const struct sys_reg_desc *table, size_t num) { @@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + int Rt = (hsr >> 5) & 0xf; int Rt2 = (hsr >> 10) & 0xf; params.is_aarch32 = true; params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; - params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); params.Op0 = 0; @@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, params.CRn = 0; /* - * Massive hack here. Store Rt2 in the top 32bits so we only - * have one register to deal with. As we use the same trap + * Make a 64-bit value out of Rt and Rt2. As we use the same trap * backends between AArch32 and AArch64, we get away with it. */ if (params.is_write) { - u64 val = *vcpu_reg(vcpu, params.Rt); - val &= 0xffffffff; - val |= *vcpu_reg(vcpu, Rt2) << 32; - *vcpu_reg(vcpu, params.Rt) = val; + params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; + params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; } if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) @@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, unhandled_cp_access(vcpu, ¶ms); out: - /* Do the opposite hack for the read side */ + /* Split up the value between registers for the read side */ if (!params.is_write) { - u64 val = *vcpu_reg(vcpu, params.Rt); - val >>= 32; - *vcpu_reg(vcpu, Rt2) = val; + vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); + vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); } return 1; @@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + int Rt = (hsr >> 5) & 0xf; params.is_aarch32 = true; params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; - params.Rt = (hsr >> 5) & 0xf; + params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = ((hsr & 1) == 0); params.CRn = (hsr >> 10) & 0xf; params.Op0 = 0; params.Op1 = (hsr >> 14) & 0x7; params.Op2 = (hsr >> 17) & 0x7; - if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) - return 1; - if (!emulate_cp(vcpu, ¶ms, global, nr_global)) + if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || + !emulate_cp(vcpu, ¶ms, global, nr_global)) { + if (!params.is_write) + vcpu_set_reg(vcpu, Rt, params.regval); return 1; + } unhandled_cp_access(vcpu, ¶ms); return 1; @@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) } static int emulate_sys_reg(struct kvm_vcpu *vcpu, - const struct sys_reg_params *params) + struct sys_reg_params *params) { size_t num; const struct sys_reg_desc *table, *r; @@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); + int Rt = (esr >> 5) & 0x1f; + int ret; trace_kvm_handle_sys_reg(esr); @@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) params.CRn = (esr >> 10) & 0xf; params.CRm = (esr >> 1) & 0xf; params.Op2 = (esr >> 17) & 0x7; - params.Rt = (esr >> 5) & 0x1f; + params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = !(esr & 1); - return emulate_sys_reg(vcpu, ¶ms); + ret = emulate_sys_reg(vcpu, ¶ms); + + if (!params.is_write) + vcpu_set_reg(vcpu, Rt, params.regval); + return ret; } /****************************************************************************** diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index eaa324e4db4d..dbbb01cfbee9 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -28,7 +28,7 @@ struct sys_reg_params { u8 CRn; u8 CRm; u8 Op2; - u8 Rt; + u64 regval; bool is_write; bool is_aarch32; bool is_32bit; /* Only valid if is_aarch32 is true */ @@ -44,7 +44,7 @@ struct sys_reg_desc { /* Trapped access from guest, if non-NULL. */ bool (*access)(struct kvm_vcpu *, - const struct sys_reg_params *, + struct sys_reg_params *, const struct sys_reg_desc *); /* Initialization for vcpu. */ @@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu, } static inline bool read_zero(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p) + struct sys_reg_params *p) { - *vcpu_reg(vcpu, p->Rt) = 0; + p->regval = 0; return true; } diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 1e4576824165..ed90578fa120 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -31,13 +31,13 @@ #include "sys_regs.h" static bool access_actlr(struct kvm_vcpu *vcpu, - const struct sys_reg_params *p, + struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); + p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1); return true; } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index f636a2639f03..e87f53ff5f58 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu) __flush_icache_all(); } -static int is_reserved_asid(u64 asid) +static bool check_update_reserved_asid(u64 asid, u64 newasid) { int cpu; - for_each_possible_cpu(cpu) - if (per_cpu(reserved_asids, cpu) == asid) - return 1; - return 0; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; } static u64 new_context(struct mm_struct *mm, unsigned int cpu) @@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ - if (is_reserved_asid(asid)) - return generation | (asid & ~ASID_MASK); + if (check_update_reserved_asid(asid, newasid)) + return newasid; /* * We had a valid ASID in a previous life, so try to re-use @@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) */ asid &= ~ASID_MASK; if (!__test_and_set_bit(asid, asid_map)) - goto bump_gen; + return newasid; } /* @@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - -bump_gen: - asid |= generation; - return asid; + return asid | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 131a199114b4..7963aa4b5d28 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -18,6 +18,7 @@ */ #include <linux/gfp.h> +#include <linux/acpi.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> @@ -28,9 +29,6 @@ #include <asm/cacheflush.h> -struct dma_map_ops *dma_ops; -EXPORT_SYMBOL(dma_ops); - static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, bool coherent) { @@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - int ret; - - dma_ops = &swiotlb_dma_ops; - - ret = atomic_pool_init(); - - return ret; + return atomic_pool_init(); } arch_initcall(arm64_dma_init); @@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); + size_t iosize = size; void *addr; if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) return NULL; + + size = PAGE_ALIGN(size); + /* * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. @@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); - pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, + pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, flush_page); if (!pages) return NULL; @@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, __builtin_return_address(0)); if (!addr) - iommu_dma_free(dev, pages, size, handle); + iommu_dma_free(dev, pages, iosize, handle); } else { struct page *page; /* @@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, if (!addr) return NULL; - *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); + *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { if (coherent) __free_pages(page, get_order(size)); @@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { + size_t iosize = size; + + size = PAGE_ALIGN(size); /* * @cpu_addr will be one of 3 things depending on how it was allocated: * - A remapped array of pages from iommu_dma_alloc(), for all @@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return; - iommu_dma_free(dev, area->pages, size, &handle); + iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - iommu_dma_unmap_page(dev, handle, size, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } @@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, struct iommu_ops *iommu, bool coherent) { - if (!acpi_disabled && !dev->archdata.dma_ops) - dev->archdata.dma_ops = dma_ops; + if (!dev->archdata.dma_ops) + dev->archdata.dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 19211c4a8911..92ddac1e8ca2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -393,16 +393,16 @@ static struct fault_info { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_bad, SIGBUS, 0, "unknown 12" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, - { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 17" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, @@ -410,16 +410,16 @@ static struct fault_info { { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, - { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 25" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, - { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, - { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 34" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, @@ -433,21 +433,21 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, - { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "TLB conflict abort" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, - { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, - { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 58" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, - { do_bad, SIGBUS, 0, "unknown 61" }, - { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "section domain fault" }, + { do_bad, SIGBUS, 0, "page domain fault" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e3f563c81c48..873e363048c6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot); static void __init *early_alloc(unsigned long sz) { - void *ptr = __va(memblock_alloc(sz, sz)); - BUG_ON(!ptr); + phys_addr_t phys; + void *ptr; + + phys = memblock_alloc(sz, sz); + BUG_ON(!phys); + ptr = __va(phys); memset(ptr, 0, sz); return ptr; } @@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) do { /* * Need to have the least restrictive permissions available - * permissions will be fixed up later. Default the new page - * range as contiguous ptes. + * permissions will be fixed up later */ - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); pfn++; } while (pte++, i++, i < PTRS_PER_PTE); } -/* - * Given a PTE with the CONT bit set, determine where the CONT range - * starts, and clear the entire range of PTE CONT bits. - */ -static void clear_cont_pte_range(pte_t *pte, unsigned long addr) -{ - int i; - - pte -= CONT_RANGE_OFFSET(addr); - for (i = 0; i < CONT_PTES; i++) { - set_pte(pte, pte_mknoncont(*pte)); - pte++; - } - flush_tlb_all(); -} - -/* - * Given a range of PTEs set the pfn and provided page protection flags - */ -static void __populate_init_pte(pte_t *pte, unsigned long addr, - unsigned long end, phys_addr_t phys, - pgprot_t prot) -{ - unsigned long pfn = __phys_to_pfn(phys); - - do { - /* clear all the bits except the pfn, then apply the prot */ - set_pte(pte, pfn_pte(pfn, prot)); - pte++; - pfn++; - addr += PAGE_SIZE; - } while (addr != end); -} - static void alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys, + unsigned long end, unsigned long pfn, pgprot_t prot, void *(*alloc)(unsigned long size)) { pte_t *pte; - unsigned long next; if (pmd_none(*pmd) || pmd_sect(*pmd)) { pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); @@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - next = min(end, (addr + CONT_SIZE) & CONT_MASK); - if (((addr | next | phys) & ~CONT_MASK) == 0) { - /* a block of CONT_PTES */ - __populate_init_pte(pte, addr, next, phys, - __pgprot(pgprot_val(prot) | PTE_CONT)); - } else { - /* - * If the range being split is already inside of a - * contiguous range but this PTE isn't going to be - * contiguous, then we want to unmark the adjacent - * ranges, then update the portion of the range we - * are interrested in. - */ - clear_cont_pte_range(pte, addr); - __populate_init_pte(pte, addr, next, phys, prot); - } - - pte += (next - addr) >> PAGE_SHIFT; - phys += next - addr; - addr = next; - } while (addr != end); + set_pte(pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); } static void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } } else { - alloc_init_pte(pmd, addr, next, phys, prot, alloc); + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot, alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); @@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) * for now. This will get more fine grained later once all memory * is mapped */ - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); if (end < kernel_x_start) { create_mapping(start, __phys_to_virt(start), @@ -451,18 +402,18 @@ static void __init fixup_executable(void) { #ifdef CONFIG_DEBUG_RODATA /* now that we are actually fully mapped, make the start/end more fine grained */ - if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_start = round_down(__pa(_stext), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(aligned_start, __phys_to_virt(aligned_start), __pa(_stext) - aligned_start, PAGE_KERNEL); } - if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_end = round_up(__pa(__init_end), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(__pa(__init_end), (unsigned long)__init_end, aligned_end - __pa(__init_end), PAGE_KERNEL); @@ -475,7 +426,7 @@ void mark_rodata_ro(void) { create_mapping_late(__pa(_stext), (unsigned long)_stext, (unsigned long)_etext - (unsigned long)_stext, - PAGE_KERNEL_EXEC | PTE_RDONLY); + PAGE_KERNEL_ROX); } #endif diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index cf3c7d4a1b58..b162ad70effc 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -50,7 +50,7 @@ static const int bpf2a64[] = { [BPF_REG_8] = A64_R(21), [BPF_REG_9] = A64_R(22), /* read-only frame pointer to access stack */ - [BPF_REG_FP] = A64_FP, + [BPF_REG_FP] = A64_R(25), /* temporary register for internal BPF JIT */ [TMP_REG_1] = A64_R(23), [TMP_REG_2] = A64_R(24), @@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15) +#define _STACK_SIZE \ + (MAX_BPF_STACK \ + + 4 /* extra for skb_copy_bits buffer */) + +#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) + static void build_prologue(struct jit_ctx *ctx) { const u8 r6 = bpf2a64[BPF_REG_6]; @@ -150,10 +156,35 @@ static void build_prologue(struct jit_ctx *ctx) const u8 rx = bpf2a64[BPF_REG_X]; const u8 tmp1 = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; - int stack_size = MAX_BPF_STACK; - stack_size += 4; /* extra for skb_copy_bits buffer */ - stack_size = STACK_ALIGN(stack_size); + /* + * BPF prog stack layout + * + * high + * original A64_SP => 0:+-----+ BPF prologue + * |FP/LR| + * current A64_FP => -16:+-----+ + * | ... | callee saved registers + * +-----+ + * | | x25/x26 + * BPF fp register => -80:+-----+ <= (BPF_FP) + * | | + * | ... | BPF prog stack + * | | + * +-----+ <= (BPF_FP - MAX_BPF_STACK) + * |RSVD | JIT scratchpad + * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + * + */ + + /* Save FP and LR registers to stay align with ARM64 AAPCS */ + emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); /* Save callee-saved register */ emit(A64_PUSH(r6, r7, A64_SP), ctx); @@ -161,12 +192,15 @@ static void build_prologue(struct jit_ctx *ctx) if (ctx->tmp_used) emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); - /* Set up BPF stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); + /* Save fp (x25) and x26. SP requires 16 bytes alignment */ + emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx); - /* Set up frame pointer */ + /* Set up BPF prog stack base register (x25) */ emit(A64_MOV(1, fp, A64_SP), ctx); + /* Set up function call stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); + /* Clear registers A and X */ emit_a64_mov_i64(ra, 0, ctx); emit_a64_mov_i64(rx, 0, ctx); @@ -182,13 +216,12 @@ static void build_epilogue(struct jit_ctx *ctx) const u8 fp = bpf2a64[BPF_REG_FP]; const u8 tmp1 = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; - int stack_size = MAX_BPF_STACK; - - stack_size += 4; /* extra for skb_copy_bits buffer */ - stack_size = STACK_ALIGN(stack_size); /* We're done with BPF stack */ - emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); + + /* Restore fs (x25) and x26 */ + emit(A64_POP(fp, A64_R(26), A64_SP), ctx); /* Restore callee-saved register */ if (ctx->tmp_used) @@ -196,8 +229,8 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx); - /* Restore frame pointer */ - emit(A64_MOV(1, fp, A64_SP), ctx); + /* Restore FP/LR registers */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); /* Set return value */ emit(A64_MOV(1, A64_R(0), r0), ctx); @@ -557,7 +590,25 @@ emit_cond_jmp: case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: - goto notyet; + /* Load imm to a register then store it */ + ctx->tmp_used = 1; + emit_a64_mov_i(1, tmp2, off, ctx); + emit_a64_mov_i(1, tmp, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(A64_STR32(tmp, dst, tmp2), ctx); + break; + case BPF_H: + emit(A64_STRH(tmp, dst, tmp2), ctx); + break; + case BPF_B: + emit(A64_STRB(tmp, dst, tmp2), ctx); + break; + case BPF_DW: + emit(A64_STR64(tmp, dst, tmp2), ctx); + break; + } + break; /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_W: @@ -624,7 +675,7 @@ emit_cond_jmp: return -EINVAL; } emit_a64_mov_i64(r3, size, ctx); - emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); + emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); @@ -758,7 +809,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, image_size, 2, ctx.image); - bpf_flush_icache(ctx.image, ctx.image + ctx.idx); + bpf_flush_icache(header, ctx.image + ctx.idx); set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)ctx.image; |