diff options
Diffstat (limited to 'arch/arm64')
111 files changed, 2402 insertions, 1606 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 42c090cf0292..29e75b47becd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -24,6 +24,7 @@ config ARM64 select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK if !PREEMPT @@ -42,8 +43,19 @@ config ARM64 select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG @@ -74,6 +86,7 @@ config ARM64 select GENERIC_CPU_AUTOPROBE select GENERIC_EARLY_IOREMAP select GENERIC_IDLE_POLL_SETUP + select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL @@ -95,7 +108,9 @@ config ARM64 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE @@ -127,6 +142,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE + select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES @@ -264,13 +280,6 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y -config MULTI_IRQ_HANDLER - def_bool y - -source "init/Kconfig" - -source "kernel/Kconfig.freezer" - source "arch/arm64/Kconfig.platforms" menu "Bus support" @@ -756,7 +765,6 @@ config HOLES_IN_ZONE def_bool y depends on NUMA -source kernel/Kconfig.preempt source kernel/Kconfig.hz config ARCH_SUPPORTS_DEBUG_PAGEALLOC @@ -775,6 +783,9 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool ARCH_SPARSEMEM_ENABLE +config ARCH_FLATMEM_ENABLE + def_bool !NUMA + config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM @@ -791,8 +802,6 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y -source "mm/Kconfig" - config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- @@ -1246,6 +1255,7 @@ config EFI bool "UEFI runtime support" depends on OF && !CPU_BIG_ENDIAN depends on KERNEL_MODE_NEON + select ARCH_SUPPORTS_ACPI select LIBFDT select UCS2_STRING select EFI_PARAMS_FROM_FDT @@ -1273,10 +1283,6 @@ config DMI endmenu -menu "Userspace binary formats" - -source "fs/Kconfig.binfmt" - config COMPAT bool "Kernel support for 32-bit EL0" depends on ARM64_4K_PAGES || EXPERT @@ -1300,8 +1306,6 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC -endmenu - menu "Power management options" source "kernel/power/Kconfig" @@ -1327,25 +1331,12 @@ source "drivers/cpufreq/Kconfig" endmenu -source "net/Kconfig" - -source "drivers/Kconfig" - source "drivers/firmware/Kconfig" source "drivers/acpi/Kconfig" -source "fs/Kconfig" - source "arch/arm64/kvm/Kconfig" -source "arch/arm64/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" if CRYPTO source "arch/arm64/crypto/Kconfig" endif - -source "lib/Kconfig" diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index cc6bd559af85..69c9170bdd24 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -1,6 +1,3 @@ -menu "Kernel hacking" - -source "lib/Kconfig.debug" config ARM64_PTDUMP_CORE def_bool n @@ -97,5 +94,3 @@ config ARM64_RELOC_TEST tristate "Relocation testing module" source "drivers/hwtracing/coresight/Kconfig" - -endmenu diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 52df25bf4f8c..35f2e6e1be23 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -268,6 +268,7 @@ config ARCH_UNIPHIER bool "Socionext UniPhier SoC Family" select ARCH_HAS_RESET_CONTROLLER select PINCTRL + select RESET_CONTROLLER help This enables support for Socionext UniPhier SoC family. diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 45272266dafb..efe61a2e4b5e 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,7 +10,7 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=-p --no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 @@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LD += -EB -LDFLAGS += -maarch64linuxb +# Prefer the baremetal ELF build target, but not all toolchains include +# it so fall back to the standard linux version if needed. +LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb) UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LD += -EL -LDFLAGS += -maarch64linux +# Same as above, prefer ELF but fall back to linux target if needed. +LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux) UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 4057197048dc..1a406a76c86a 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -482,9 +482,9 @@ status = "disabled"; }; - mdio_mux_iproc: mdio-mux@6602023c { + mdio_mux_iproc: mdio-mux@66020000 { compatible = "brcm,mdio-mux-iproc"; - reg = <0x6602023c 0x14>; + reg = <0x66020000 0x250>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index a79ea126723e..e283480bfc7e 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -296,9 +296,9 @@ #include "stingray-pinctrl.dtsi" - mdio_mux_iproc: mdio-mux@2023c { + mdio_mux_iproc: mdio-mux@20000 { compatible = "brcm,mdio-mux-iproc"; - reg = <0x0002023c 0x14>; + reg = <0x00020000 0x250>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi index ad6f46d8536b..263b972a6d1e 100644 --- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi +++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi @@ -11,13 +11,14 @@ fman0: fman@1a00000 { #size-cells = <1>; cell-index = <0>; compatible = "fsl,fman"; - ranges = <0x0 0x0 0x1a00000 0x100000>; - reg = <0x0 0x1a00000 0x0 0x100000>; + ranges = <0x0 0x0 0x1a00000 0xfe000>; + reg = <0x0 0x1a00000 0x0 0xfe000>; interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clockgen 3 0>; clock-names = "fmanclk"; fsl,qman-channel-range = <0x800 0x10>; + ptimer-handle = <&ptp_timer0>; muram@0 { compatible = "fsl,fman-muram"; @@ -73,9 +74,11 @@ fman0: fman@1a00000 { compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; }; +}; - ptp_timer0: ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; +ptp_timer0: ptp-timer@1afe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0x0 0x1afe000 0x0 0x1000>; + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clockgen 3 0>; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi index fa79a5d8fb5b..f432b0a88c65 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi @@ -1009,6 +1009,24 @@ reset-gpios = <&gpio11 1 0 >; }; + /* UFS */ + ufs: ufs@ff3b0000 { + compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1"; + /* 0: HCI standard */ + /* 1: UFS SYS CTRL */ + reg = <0x0 0xff3b0000 0x0 0x1000>, + <0x0 0xff3b1000 0x0 0x1000>; + interrupt-parent = <&gic>; + interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>, + <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>; + clock-names = "ref_clk", "phy_clk"; + freq-table-hz = <0 0>, <0 0>; + /* offset: 0x84; bit: 12 */ + resets = <&crg_rst 0x84 12>; + reset-names = "rst"; + }; + /* SD */ dwmmc1: dwmmc1@ff37f000 { compatible = "hisilicon,hi3660-dw-mshc"; diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi index 9c10030a07f8..c33adefc3061 100644 --- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi @@ -1049,7 +1049,74 @@ num-pins = <2>; }; }; + p0_mbigen_alg_a:interrupt-controller@d0080000 { + compatible = "hisilicon,mbigen-v2"; + reg = <0x0 0xd0080000 0x0 0x10000>; + p0_mbigen_sec_a: intc_sec { + msi-parent = <&p0_its_dsa_a 0x40400>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <33>; + }; + p0_mbigen_smmu_alg_a: intc_smmu_alg { + msi-parent = <&p0_its_dsa_a 0x40b1b>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; + }; + p0_mbigen_alg_b:interrupt-controller@8,d0080000 { + compatible = "hisilicon,mbigen-v2"; + reg = <0x8 0xd0080000 0x0 0x10000>; + + p0_mbigen_sec_b: intc_sec { + msi-parent = <&p0_its_dsa_b 0x42400>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <33>; + }; + p0_mbigen_smmu_alg_b: intc_smmu_alg { + msi-parent = <&p0_its_dsa_b 0x42b1b>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; + }; + p1_mbigen_alg_a:interrupt-controller@400,d0080000 { + compatible = "hisilicon,mbigen-v2"; + reg = <0x400 0xd0080000 0x0 0x10000>; + + p1_mbigen_sec_a: intc_sec { + msi-parent = <&p1_its_dsa_a 0x44400>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <33>; + }; + p1_mbigen_smmu_alg_a: intc_smmu_alg { + msi-parent = <&p1_its_dsa_a 0x44b1b>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; + }; + p1_mbigen_alg_b:interrupt-controller@408,d0080000 { + compatible = "hisilicon,mbigen-v2"; + reg = <0x408 0xd0080000 0x0 0x10000>; + + p1_mbigen_sec_b: intc_sec { + msi-parent = <&p1_its_dsa_b 0x46400>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <33>; + }; + p1_mbigen_smmu_alg_b: intc_smmu_alg { + msi-parent = <&p1_its_dsa_b 0x46b1b>; + interrupt-controller; + #interrupt-cells = <2>; + num-pins = <3>; + }; + }; p0_mbigen_dsa_a: interrupt-controller@c0080000 { compatible = "hisilicon,mbigen-v2"; reg = <0x0 0xc0080000 0x0 0x10000>; @@ -1107,6 +1174,58 @@ hisilicon,broken-prefetch-cmd; status = "disabled"; }; + p0_smmu_alg_a: smmu_alg@d0040000 { + compatible = "arm,smmu-v3"; + reg = <0x0 0xd0040000 0x0 0x20000>; + interrupt-parent = <&p0_mbigen_smmu_alg_a>; + interrupts = <733 1>, + <734 1>, + <735 1>; + interrupt-names = "eventq", "gerror", "priq"; + #iommu-cells = <1>; + dma-coherent; + hisilicon,broken-prefetch-cmd; + /* smmu-cb-memtype = <0x0 0x1>;*/ + }; + p0_smmu_alg_b: smmu_alg@8,d0040000 { + compatible = "arm,smmu-v3"; + reg = <0x8 0xd0040000 0x0 0x20000>; + interrupt-parent = <&p0_mbigen_smmu_alg_b>; + interrupts = <733 1>, + <734 1>, + <735 1>; + interrupt-names = "eventq", "gerror", "priq"; + #iommu-cells = <1>; + dma-coherent; + hisilicon,broken-prefetch-cmd; + /* smmu-cb-memtype = <0x0 0x1>;*/ + }; + p1_smmu_alg_a: smmu_alg@400,d0040000 { + compatible = "arm,smmu-v3"; + reg = <0x400 0xd0040000 0x0 0x20000>; + interrupt-parent = <&p1_mbigen_smmu_alg_a>; + interrupts = <733 1>, + <734 1>, + <735 1>; + interrupt-names = "eventq", "gerror", "priq"; + #iommu-cells = <1>; + dma-coherent; + hisilicon,broken-prefetch-cmd; + /* smmu-cb-memtype = <0x0 0x1>;*/ + }; + p1_smmu_alg_b: smmu_alg@408,d0040000 { + compatible = "arm,smmu-v3"; + reg = <0x408 0xd0040000 0x0 0x20000>; + interrupt-parent = <&p1_mbigen_smmu_alg_b>; + interrupts = <733 1>, + <734 1>, + <735 1>; + interrupt-names = "eventq", "gerror", "priq"; + #iommu-cells = <1>; + dma-coherent; + hisilicon,broken-prefetch-cmd; + /* smmu-cb-memtype = <0x0 0x1>;*/ + }; soc { compatible = "simple-bus"; @@ -1603,5 +1722,170 @@ 0x0 0 0 4 &mbigen_pcie2_a 671 4>; status = "disabled"; }; + p0_sec_a: crypto@d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x0 0xd0000000 0x0 0x10000 + 0x0 0xd2000000 0x0 0x10000 + 0x0 0xd2010000 0x0 0x10000 + 0x0 0xd2020000 0x0 0x10000 + 0x0 0xd2030000 0x0 0x10000 + 0x0 0xd2040000 0x0 0x10000 + 0x0 0xd2050000 0x0 0x10000 + 0x0 0xd2060000 0x0 0x10000 + 0x0 0xd2070000 0x0 0x10000 + 0x0 0xd2080000 0x0 0x10000 + 0x0 0xd2090000 0x0 0x10000 + 0x0 0xd20a0000 0x0 0x10000 + 0x0 0xd20b0000 0x0 0x10000 + 0x0 0xd20c0000 0x0 0x10000 + 0x0 0xd20d0000 0x0 0x10000 + 0x0 0xd20e0000 0x0 0x10000 + 0x0 0xd20f0000 0x0 0x10000 + 0x0 0xd2100000 0x0 0x10000>; + interrupt-parent = <&p0_mbigen_sec_a>; + iommus = <&p0_smmu_alg_a 0x600>; + dma-coherent; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + }; + p0_sec_b: crypto@8,d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x8 0xd0000000 0x0 0x10000 + 0x8 0xd2000000 0x0 0x10000 + 0x8 0xd2010000 0x0 0x10000 + 0x8 0xd2020000 0x0 0x10000 + 0x8 0xd2030000 0x0 0x10000 + 0x8 0xd2040000 0x0 0x10000 + 0x8 0xd2050000 0x0 0x10000 + 0x8 0xd2060000 0x0 0x10000 + 0x8 0xd2070000 0x0 0x10000 + 0x8 0xd2080000 0x0 0x10000 + 0x8 0xd2090000 0x0 0x10000 + 0x8 0xd20a0000 0x0 0x10000 + 0x8 0xd20b0000 0x0 0x10000 + 0x8 0xd20c0000 0x0 0x10000 + 0x8 0xd20d0000 0x0 0x10000 + 0x8 0xd20e0000 0x0 0x10000 + 0x8 0xd20f0000 0x0 0x10000 + 0x8 0xd2100000 0x0 0x10000>; + interrupt-parent = <&p0_mbigen_sec_b>; + iommus = <&p0_smmu_alg_b 0x600>; + dma-coherent; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + }; + p1_sec_a: crypto@400,d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x400 0xd0000000 0x0 0x10000 + 0x400 0xd2000000 0x0 0x10000 + 0x400 0xd2010000 0x0 0x10000 + 0x400 0xd2020000 0x0 0x10000 + 0x400 0xd2030000 0x0 0x10000 + 0x400 0xd2040000 0x0 0x10000 + 0x400 0xd2050000 0x0 0x10000 + 0x400 0xd2060000 0x0 0x10000 + 0x400 0xd2070000 0x0 0x10000 + 0x400 0xd2080000 0x0 0x10000 + 0x400 0xd2090000 0x0 0x10000 + 0x400 0xd20a0000 0x0 0x10000 + 0x400 0xd20b0000 0x0 0x10000 + 0x400 0xd20c0000 0x0 0x10000 + 0x400 0xd20d0000 0x0 0x10000 + 0x400 0xd20e0000 0x0 0x10000 + 0x400 0xd20f0000 0x0 0x10000 + 0x400 0xd2100000 0x0 0x10000>; + interrupt-parent = <&p1_mbigen_sec_a>; + iommus = <&p1_smmu_alg_a 0x600>; + dma-coherent; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + }; + p1_sec_b: crypto@408,d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x408 0xd0000000 0x0 0x10000 + 0x408 0xd2000000 0x0 0x10000 + 0x408 0xd2010000 0x0 0x10000 + 0x408 0xd2020000 0x0 0x10000 + 0x408 0xd2030000 0x0 0x10000 + 0x408 0xd2040000 0x0 0x10000 + 0x408 0xd2050000 0x0 0x10000 + 0x408 0xd2060000 0x0 0x10000 + 0x408 0xd2070000 0x0 0x10000 + 0x408 0xd2080000 0x0 0x10000 + 0x408 0xd2090000 0x0 0x10000 + 0x408 0xd20a0000 0x0 0x10000 + 0x408 0xd20b0000 0x0 0x10000 + 0x408 0xd20c0000 0x0 0x10000 + 0x408 0xd20d0000 0x0 0x10000 + 0x408 0xd20e0000 0x0 0x10000 + 0x408 0xd20f0000 0x0 0x10000 + 0x408 0xd2100000 0x0 0x10000>; + interrupt-parent = <&p1_mbigen_sec_b>; + iommus = <&p1_smmu_alg_b 0x600>; + dma-coherent; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + }; + }; }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f9a186f6af8a..f67e8d5e93ad 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -179,6 +179,7 @@ CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_NAND_MARVELL=y +CONFIG_MTD_NAND_QCOM=y CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m @@ -193,6 +194,7 @@ CONFIG_SCSI_HISI_SAS=y CONFIG_SCSI_HISI_SAS_PCI=y CONFIG_SCSI_UFSHCD=m CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_UFS_HISI=y CONFIG_SCSI_UFS_QCOM=m CONFIG_ATA=y CONFIG_SATA_AHCI=y @@ -294,6 +296,8 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_DEV_BUS=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_TCG_TPM=y +CONFIG_TCG_TIS_I2C_INFINEON=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA954x=y @@ -348,10 +352,12 @@ CONFIG_POWER_RESET_MSM=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_BATTERY_SBS=m CONFIG_BATTERY_BQ27XXX=y CONFIG_SENSORS_ARM_SCPI=y CONFIG_SENSORS_LM90=m CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_RASPBERRYPI_HWMON=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y @@ -364,16 +370,18 @@ CONFIG_TEGRA_BPMP_THERMAL=m CONFIG_QCOM_TSENS=y CONFIG_UNIPHIER_THERMAL=y CONFIG_WATCHDOG=y +CONFIG_ARM_SP805_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m CONFIG_MESON_WATCHDOG=m CONFIG_RENESAS_WDT=y CONFIG_UNIPHIER_WATCHDOG=y CONFIG_BCM2835_WDT=y +CONFIG_MFD_BD9571MWV=y CONFIG_MFD_AXP20X_RSB=y CONFIG_MFD_CROS_EC=y -CONFIG_MFD_CROS_EC_I2C=y -CONFIG_MFD_CROS_EC_SPI=y +CONFIG_CROS_EC_I2C=y +CONFIG_CROS_EC_SPI=y CONFIG_MFD_CROS_EC_CHARDEV=m CONFIG_MFD_EXYNOS_LPASS=m CONFIG_MFD_HI6421_PMIC=y @@ -384,6 +392,7 @@ CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y +CONFIG_REGULATOR_BD9571MWV=y CONFIG_REGULATOR_FAN53555=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y @@ -394,6 +403,7 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y +CONFIG_REGULATOR_VCTRL=m CONFIG_RC_CORE=m CONFIG_RC_DECODERS=y CONFIG_RC_DEVICES=y @@ -406,6 +416,8 @@ CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m CONFIG_VIDEO_SAMSUNG_S5P_MFC=m CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m @@ -447,9 +459,20 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y CONFIG_SND_BCM2835_SOC_I2S=m +CONFIG_SND_SOC_ROCKCHIP=m +CONFIG_SND_SOC_ROCKCHIP_I2S=m +CONFIG_SND_SOC_ROCKCHIP_SPDIF=m +CONFIG_SND_SOC_ROCKCHIP_RT5645=m +CONFIG_SND_SOC_RK3399_GRU_SOUND=m CONFIG_SND_SOC_SAMSUNG=y CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_AK4613=m +CONFIG_SND_SOC_DA7219=m +CONFIG_SND_SOC_MAX98357A=m +CONFIG_SND_SOC_RL6231=m +CONFIG_SND_SOC_RT5514=m +CONFIG_SND_SOC_RT5514_SPI=m +CONFIG_SND_SOC_RT5645=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m CONFIG_I2C_HID=m @@ -616,6 +639,7 @@ CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_PCIE=m CONFIG_PHY_ROCKCHIP_TYPEC=y CONFIG_PHY_TEGRA_XUSB=y +CONFIG_HISI_PMU=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y CONFIG_QCOM_QFPROM=y diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 88f5aef7934c..e3a375c4cb83 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -19,33 +19,24 @@ * u32 *macp, u8 const rk[], u32 rounds); */ ENTRY(ce_aes_ccm_auth_data) - frame_push 7 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 - - ldr w25, [x22] /* leftover from prev round? */ + ldr w8, [x3] /* leftover from prev round? */ ld1 {v0.16b}, [x0] /* load mac */ - cbz w25, 1f - sub w25, w25, #16 + cbz w8, 1f + sub w8, w8, #16 eor v1.16b, v1.16b, v1.16b -0: ldrb w7, [x20], #1 /* get 1 byte of input */ - subs w21, w21, #1 - add w25, w25, #1 +0: ldrb w7, [x1], #1 /* get 1 byte of input */ + subs w2, w2, #1 + add w8, w8, #1 ins v1.b[0], w7 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ beq 8f /* out of input? */ - cbnz w25, 0b + cbnz w8, 0b eor v0.16b, v0.16b, v1.16b -1: ld1 {v3.4s}, [x23] /* load first round key */ - prfm pldl1strm, [x20] - cmp w24, #12 /* which key size? */ - add x6, x23, #16 - sub w7, w24, #2 /* modified # of rounds */ +1: ld1 {v3.4s}, [x4] /* load first round key */ + prfm pldl1strm, [x1] + cmp w5, #12 /* which key size? */ + add x6, x4, #16 + sub w7, w5, #2 /* modified # of rounds */ bmi 2f bne 5f mov v5.16b, v3.16b @@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data) ld1 {v5.4s}, [x6], #16 /* load next round key */ bpl 3b aese v0.16b, v4.16b - subs w21, w21, #16 /* last data? */ + subs w2, w2, #16 /* last data? */ eor v0.16b, v0.16b, v5.16b /* final round */ bmi 6f - ld1 {v1.16b}, [x20], #16 /* load next input block */ + ld1 {v1.16b}, [x1], #16 /* load next input block */ eor v0.16b, v0.16b, v1.16b /* xor with mac */ - beq 6f - - if_will_cond_yield_neon - st1 {v0.16b}, [x19] /* store mac */ - do_cond_yield_neon - ld1 {v0.16b}, [x19] /* reload mac */ - endif_yield_neon - - b 1b -6: st1 {v0.16b}, [x19] /* store mac */ + bne 1b +6: st1 {v0.16b}, [x0] /* store mac */ beq 10f - adds w21, w21, #16 + adds w2, w2, #16 beq 10f - mov w25, w21 -7: ldrb w7, [x20], #1 + mov w8, w2 +7: ldrb w7, [x1], #1 umov w6, v0.b[0] eor w6, w6, w7 - strb w6, [x19], #1 - subs w21, w21, #1 + strb w6, [x0], #1 + subs w2, w2, #1 beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w25 - add w25, w25, #16 +8: mov w7, w8 + add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b eor v0.16b, v0.16b, v1.16b - st1 {v0.16b}, [x19] -10: str w25, [x22] - - frame_pop + st1 {v0.16b}, [x0] +10: str w8, [x3] ret ENDPROC(ce_aes_ccm_auth_data) @@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final) ENDPROC(ce_aes_ccm_final) .macro aes_ccm_do_crypt,enc - frame_push 8 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 - mov x25, x6 - - ldr x26, [x25, #8] /* load lower ctr */ - ld1 {v0.16b}, [x24] /* load mac */ -CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ + ldr x8, [x6, #8] /* load lower ctr */ + ld1 {v0.16b}, [x5] /* load mac */ +CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */ 0: /* outer loop */ - ld1 {v1.8b}, [x25] /* load upper ctr */ - prfm pldl1strm, [x20] - add x26, x26, #1 - rev x9, x26 - cmp w23, #12 /* which key size? */ - sub w7, w23, #2 /* get modified # of rounds */ + ld1 {v1.8b}, [x6] /* load upper ctr */ + prfm pldl1strm, [x1] + add x8, x8, #1 + rev x9, x8 + cmp w4, #12 /* which key size? */ + sub w7, w4, #2 /* get modified # of rounds */ ins v1.d[1], x9 /* no carry in lower ctr */ - ld1 {v3.4s}, [x22] /* load first round key */ - add x10, x22, #16 + ld1 {v3.4s}, [x3] /* load first round key */ + add x10, x3, #16 bmi 1f bne 4f mov v5.16b, v3.16b @@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ bpl 2b aese v0.16b, v4.16b aese v1.16b, v4.16b - subs w21, w21, #16 - bmi 7f /* partial block? */ - ld1 {v2.16b}, [x20], #16 /* load next input block */ + subs w2, w2, #16 + bmi 6f /* partial block? */ + ld1 {v2.16b}, [x1], #16 /* load next input block */ .if \enc == 1 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ @@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */ eor v1.16b, v2.16b, v5.16b /* final round enc */ .endif eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ - st1 {v1.16b}, [x19], #16 /* write output block */ - beq 5f - - if_will_cond_yield_neon - st1 {v0.16b}, [x24] /* store mac */ - do_cond_yield_neon - ld1 {v0.16b}, [x24] /* reload mac */ - endif_yield_neon - - b 0b -5: -CPU_LE( rev x26, x26 ) - st1 {v0.16b}, [x24] /* store mac */ - str x26, [x25, #8] /* store lsb end of ctr (BE) */ - -6: frame_pop - ret - -7: eor v0.16b, v0.16b, v5.16b /* final round mac */ + st1 {v1.16b}, [x0], #16 /* write output block */ + bne 0b +CPU_LE( rev x8, x8 ) + st1 {v0.16b}, [x5] /* store mac */ + str x8, [x6, #8] /* store lsb end of ctr (BE) */ +5: ret + +6: eor v0.16b, v0.16b, v5.16b /* final round mac */ eor v1.16b, v1.16b, v5.16b /* final round enc */ - st1 {v0.16b}, [x24] /* store mac */ - add w21, w21, #16 /* process partial tail block */ -8: ldrb w9, [x20], #1 /* get 1 byte of input */ + st1 {v0.16b}, [x5] /* store mac */ + add w2, w2, #16 /* process partial tail block */ +7: ldrb w9, [x1], #1 /* get 1 byte of input */ umov w6, v1.b[0] /* get top crypted ctr byte */ umov w7, v0.b[0] /* get top mac byte */ .if \enc == 1 @@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 ) eor w9, w9, w6 eor w7, w7, w9 .endif - strb w9, [x19], #1 /* store out byte */ - strb w7, [x24], #1 /* store mac byte */ - subs w21, w21, #1 - beq 6b + strb w9, [x0], #1 /* store out byte */ + strb w7, [x5], #1 /* store mac byte */ + subs w2, w2, #1 + beq 5b ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ - b 8b + b 7b .endm /* diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index e3e50950a863..adcb83eb683c 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -567,7 +567,6 @@ static struct shash_alg mac_algs[] = { { .base.cra_name = "cmac(aes)", .base.cra_driver_name = "cmac-aes-" MODE, .base.cra_priority = PRIO, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -583,7 +582,6 @@ static struct shash_alg mac_algs[] = { { .base.cra_name = "xcbc(aes)", .base.cra_driver_name = "xcbc-aes-" MODE, .base.cra_priority = PRIO, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -599,7 +597,6 @@ static struct shash_alg mac_algs[] = { { .base.cra_name = "cbcmac(aes)", .base.cra_driver_name = "cbcmac-aes-" MODE, .base.cra_priority = PRIO, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx), .base.cra_module = THIS_MODULE, diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index dcffb9e77589..1b319b716d5e 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -46,6 +46,19 @@ ss3 .req v26 ss4 .req v27 + XL2 .req v8 + XM2 .req v9 + XH2 .req v10 + XL3 .req v11 + XM3 .req v12 + XH3 .req v13 + TT3 .req v14 + TT4 .req v15 + HH .req v16 + HH3 .req v17 + HH4 .req v18 + HH34 .req v19 + .text .arch armv8-a+crypto @@ -134,11 +147,25 @@ .endm .macro __pmull_pre_p64 + add x8, x3, #16 + ld1 {HH.2d-HH4.2d}, [x8] + + trn1 SHASH2.2d, SHASH.2d, HH.2d + trn2 T1.2d, SHASH.2d, HH.2d + eor SHASH2.16b, SHASH2.16b, T1.16b + + trn1 HH34.2d, HH3.2d, HH4.2d + trn2 T1.2d, HH3.2d, HH4.2d + eor HH34.16b, HH34.16b, T1.16b + movi MASK.16b, #0xe1 shl MASK.2d, MASK.2d, #57 .endm .macro __pmull_pre_p8 + ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 + eor SHASH2.16b, SHASH2.16b, SHASH.16b + // k00_16 := 0x0000000000000000_000000000000ffff // k32_48 := 0x00000000ffffffff_0000ffffffffffff movi k32_48.2d, #0xffffffff @@ -213,31 +240,88 @@ .endm .macro __pmull_ghash, pn - frame_push 5 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - -0: ld1 {SHASH.2d}, [x22] - ld1 {XL.2d}, [x20] - ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 - eor SHASH2.16b, SHASH2.16b, SHASH.16b + ld1 {SHASH.2d}, [x3] + ld1 {XL.2d}, [x1] __pmull_pre_\pn /* do the head block first, if supplied */ - cbz x23, 1f - ld1 {T1.2d}, [x23] - mov x23, xzr - b 2f + cbz x4, 0f + ld1 {T1.2d}, [x4] + mov x4, xzr + b 3f + +0: .ifc \pn, p64 + tbnz w0, #0, 2f // skip until #blocks is a + tbnz w0, #1, 2f // round multiple of 4 + +1: ld1 {XM3.16b-TT4.16b}, [x2], #64 + + sub w0, w0, #4 + + rev64 T1.16b, XM3.16b + rev64 T2.16b, XH3.16b + rev64 TT4.16b, TT4.16b + rev64 TT3.16b, TT3.16b + + ext IN1.16b, TT4.16b, TT4.16b, #8 + ext XL3.16b, TT3.16b, TT3.16b, #8 + + eor TT4.16b, TT4.16b, IN1.16b + pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1 + pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 + pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0) + + eor TT3.16b, TT3.16b, XL3.16b + pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1 + pmull XL3.1q, HH.1d, XL3.1d // a0 * b0 + pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0) + + ext IN1.16b, T2.16b, T2.16b, #8 + eor XL2.16b, XL2.16b, XL3.16b + eor XH2.16b, XH2.16b, XH3.16b + eor XM2.16b, XM2.16b, XM3.16b + + eor T2.16b, T2.16b, IN1.16b + pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1 + pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0 + pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0) -1: ld1 {T1.2d}, [x21], #16 - sub w19, w19, #1 + eor XL2.16b, XL2.16b, XL3.16b + eor XH2.16b, XH2.16b, XH3.16b + eor XM2.16b, XM2.16b, XM3.16b -2: /* multiply XL by SHASH in GF(2^128) */ + ext IN1.16b, T1.16b, T1.16b, #8 + ext TT3.16b, XL.16b, XL.16b, #8 + eor XL.16b, XL.16b, IN1.16b + eor T1.16b, T1.16b, TT3.16b + + pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1 + eor T1.16b, T1.16b, XL.16b + pmull XL.1q, HH4.1d, XL.1d // a0 * b0 + pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0) + + eor XL.16b, XL.16b, XL2.16b + eor XH.16b, XH.16b, XH2.16b + eor XM.16b, XM.16b, XM2.16b + + eor T2.16b, XL.16b, XH.16b + ext T1.16b, XL.16b, XH.16b, #8 + eor XM.16b, XM.16b, T2.16b + + __pmull_reduce_p64 + + eor T2.16b, T2.16b, XH.16b + eor XL.16b, XL.16b, T2.16b + + cbz w0, 5f + b 1b + .endif + +2: ld1 {T1.2d}, [x2], #16 + sub w0, w0, #1 + +3: /* multiply XL by SHASH in GF(2^128) */ CPU_LE( rev64 T1.16b, T1.16b ) ext T2.16b, XL.16b, XL.16b, #8 @@ -250,7 +334,7 @@ CPU_LE( rev64 T1.16b, T1.16b ) __pmull_\pn XL, XL, SHASH // a0 * b0 __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0) - eor T2.16b, XL.16b, XH.16b +4: eor T2.16b, XL.16b, XH.16b ext T1.16b, XL.16b, XH.16b, #8 eor XM.16b, XM.16b, T2.16b @@ -259,18 +343,9 @@ CPU_LE( rev64 T1.16b, T1.16b ) eor T2.16b, T2.16b, XH.16b eor XL.16b, XL.16b, T2.16b - cbz w19, 3f + cbnz w0, 0b - if_will_cond_yield_neon - st1 {XL.2d}, [x20] - do_cond_yield_neon - b 0b - endif_yield_neon - - b 1b - -3: st1 {XL.2d}, [x20] - frame_pop +5: st1 {XL.2d}, [x1] ret .endm @@ -286,9 +361,10 @@ ENTRY(pmull_ghash_update_p8) __pmull_ghash p8 ENDPROC(pmull_ghash_update_p8) - KS .req v8 - CTR .req v9 - INP .req v10 + KS0 .req v12 + KS1 .req v13 + INP0 .req v14 + INP1 .req v15 .macro load_round_keys, rounds, rk cmp \rounds, #12 @@ -322,142 +398,153 @@ ENDPROC(pmull_ghash_update_p8) .endm .macro pmull_gcm_do_crypt, enc - frame_push 10 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 - mov x25, x6 - mov x26, x7 - .if \enc == 1 - ldr x27, [sp, #96] // first stacked arg - .endif - - ldr x28, [x24, #8] // load lower counter -CPU_LE( rev x28, x28 ) - -0: mov x0, x25 - load_round_keys w26, x0 - ld1 {SHASH.2d}, [x23] - ld1 {XL.2d}, [x20] + ld1 {SHASH.2d}, [x4], #16 + ld1 {HH.2d}, [x4] + ld1 {XL.2d}, [x1] + ldr x8, [x5, #8] // load lower counter movi MASK.16b, #0xe1 - ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 + trn1 SHASH2.2d, SHASH.2d, HH.2d + trn2 T1.2d, SHASH.2d, HH.2d +CPU_LE( rev x8, x8 ) shl MASK.2d, MASK.2d, #57 - eor SHASH2.16b, SHASH2.16b, SHASH.16b + eor SHASH2.16b, SHASH2.16b, T1.16b .if \enc == 1 - ld1 {KS.16b}, [x27] + ldr x10, [sp] + ld1 {KS0.16b-KS1.16b}, [x10] .endif -1: ld1 {CTR.8b}, [x24] // load upper counter - ld1 {INP.16b}, [x22], #16 - rev x9, x28 - add x28, x28, #1 - sub w19, w19, #1 - ins CTR.d[1], x9 // set lower counter + cbnz x6, 4f + +0: ld1 {INP0.16b-INP1.16b}, [x3], #32 + + rev x9, x8 + add x11, x8, #1 + add x8, x8, #2 .if \enc == 1 - eor INP.16b, INP.16b, KS.16b // encrypt input - st1 {INP.16b}, [x21], #16 + eor INP0.16b, INP0.16b, KS0.16b // encrypt input + eor INP1.16b, INP1.16b, KS1.16b .endif - rev64 T1.16b, INP.16b + ld1 {KS0.8b}, [x5] // load upper counter + rev x11, x11 + sub w0, w0, #2 + mov KS1.8b, KS0.8b + ins KS0.d[1], x9 // set lower counter + ins KS1.d[1], x11 - cmp w26, #12 - b.ge 4f // AES-192/256? + rev64 T1.16b, INP1.16b -2: enc_round CTR, v21 + cmp w7, #12 + b.ge 2f // AES-192/256? - ext T2.16b, XL.16b, XL.16b, #8 +1: enc_round KS0, v21 ext IN1.16b, T1.16b, T1.16b, #8 - enc_round CTR, v22 + enc_round KS1, v21 + pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1 + + enc_round KS0, v22 + eor T1.16b, T1.16b, IN1.16b + + enc_round KS1, v22 + pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0 + enc_round KS0, v23 + pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0) + + enc_round KS1, v23 + rev64 T1.16b, INP0.16b + ext T2.16b, XL.16b, XL.16b, #8 + + enc_round KS0, v24 + ext IN1.16b, T1.16b, T1.16b, #8 eor T1.16b, T1.16b, T2.16b - eor XL.16b, XL.16b, IN1.16b - enc_round CTR, v23 + enc_round KS1, v24 + eor XL.16b, XL.16b, IN1.16b - pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1 + enc_round KS0, v25 eor T1.16b, T1.16b, XL.16b - enc_round CTR, v24 + enc_round KS1, v25 + pmull2 XH.1q, HH.2d, XL.2d // a1 * b1 + + enc_round KS0, v26 + pmull XL.1q, HH.1d, XL.1d // a0 * b0 - pmull XL.1q, SHASH.1d, XL.1d // a0 * b0 - pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0) + enc_round KS1, v26 + pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0) - enc_round CTR, v25 + enc_round KS0, v27 + eor XL.16b, XL.16b, XL2.16b + eor XH.16b, XH.16b, XH2.16b + enc_round KS1, v27 + eor XM.16b, XM.16b, XM2.16b ext T1.16b, XL.16b, XH.16b, #8 + + enc_round KS0, v28 eor T2.16b, XL.16b, XH.16b eor XM.16b, XM.16b, T1.16b - enc_round CTR, v26 - + enc_round KS1, v28 eor XM.16b, XM.16b, T2.16b - pmull T2.1q, XL.1d, MASK.1d - enc_round CTR, v27 + enc_round KS0, v29 + pmull T2.1q, XL.1d, MASK.1d + enc_round KS1, v29 mov XH.d[0], XM.d[1] mov XM.d[1], XL.d[0] - enc_round CTR, v28 - + aese KS0.16b, v30.16b eor XL.16b, XM.16b, T2.16b - enc_round CTR, v29 - + aese KS1.16b, v30.16b ext T2.16b, XL.16b, XL.16b, #8 - aese CTR.16b, v30.16b - + eor KS0.16b, KS0.16b, v31.16b pmull XL.1q, XL.1d, MASK.1d eor T2.16b, T2.16b, XH.16b - eor KS.16b, CTR.16b, v31.16b - + eor KS1.16b, KS1.16b, v31.16b eor XL.16b, XL.16b, T2.16b .if \enc == 0 - eor INP.16b, INP.16b, KS.16b - st1 {INP.16b}, [x21], #16 + eor INP0.16b, INP0.16b, KS0.16b + eor INP1.16b, INP1.16b, KS1.16b .endif - cbz w19, 3f + st1 {INP0.16b-INP1.16b}, [x2], #32 - if_will_cond_yield_neon - st1 {XL.2d}, [x20] - .if \enc == 1 - st1 {KS.16b}, [x27] - .endif - do_cond_yield_neon - b 0b - endif_yield_neon + cbnz w0, 0b - b 1b +CPU_LE( rev x8, x8 ) + st1 {XL.2d}, [x1] + str x8, [x5, #8] // store lower counter -3: st1 {XL.2d}, [x20] .if \enc == 1 - st1 {KS.16b}, [x27] + st1 {KS0.16b-KS1.16b}, [x10] .endif -CPU_LE( rev x28, x28 ) - str x28, [x24, #8] // store lower counter - - frame_pop ret -4: b.eq 5f // AES-192? - enc_round CTR, v17 - enc_round CTR, v18 -5: enc_round CTR, v19 - enc_round CTR, v20 - b 2b +2: b.eq 3f // AES-192? + enc_round KS0, v17 + enc_round KS1, v17 + enc_round KS0, v18 + enc_round KS1, v18 +3: enc_round KS0, v19 + enc_round KS1, v19 + enc_round KS0, v20 + enc_round KS1, v20 + b 1b + +4: load_round_keys w7, x6 + b 0b .endm /* diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 7cf0b1aa6ea8..6e9f33d14930 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -33,9 +33,12 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GCM_IV_SIZE 12 struct ghash_key { - u64 a; - u64 b; - be128 k; + u64 h[2]; + u64 h2[2]; + u64 h3[2]; + u64 h4[2]; + + be128 k; }; struct ghash_desc_ctx { @@ -113,6 +116,9 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, } } +/* avoid hogging the CPU for too long */ +#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) + static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int len) { @@ -136,11 +142,16 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, blocks = len / GHASH_BLOCK_SIZE; len %= GHASH_BLOCK_SIZE; - ghash_do_update(blocks, ctx->digest, src, key, - partial ? ctx->buf : NULL); + do { + int chunk = min(blocks, MAX_BLOCKS); - src += blocks * GHASH_BLOCK_SIZE; - partial = 0; + ghash_do_update(chunk, ctx->digest, src, key, + partial ? ctx->buf : NULL); + + blocks -= chunk; + src += chunk * GHASH_BLOCK_SIZE; + partial = 0; + } while (unlikely(blocks > 0)); } if (len) memcpy(ctx->buf + partial, src, len); @@ -166,23 +177,36 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) return 0; } +static void ghash_reflect(u64 h[], const be128 *k) +{ + u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0; + + h[0] = (be64_to_cpu(k->b) << 1) | carry; + h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63); + + if (carry) + h[1] ^= 0xc200000000000000UL; +} + static int __ghash_setkey(struct ghash_key *key, const u8 *inkey, unsigned int keylen) { - u64 a, b; + be128 h; /* needed for the fallback */ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE); - /* perform multiplication by 'x' in GF(2^128) */ - b = get_unaligned_be64(inkey); - a = get_unaligned_be64(inkey + 8); + ghash_reflect(key->h, &key->k); - key->a = (a << 1) | (b >> 63); - key->b = (b << 1) | (a >> 63); + h = key->k; + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h2, &h); - if (b >> 63) - key->b ^= 0xc200000000000000UL; + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h3, &h); + + gf128mul_lle(&h, &key->k); + ghash_reflect(key->h4, &h); return 0; } @@ -204,7 +228,6 @@ static struct shash_alg ghash_alg = { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key), .base.cra_module = THIS_MODULE, @@ -245,7 +268,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey, __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){}, num_rounds(&ctx->aes_key)); - return __ghash_setkey(&ctx->ghash_key, key, sizeof(key)); + return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128)); } static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) @@ -349,9 +372,10 @@ static int gcm_encrypt(struct aead_request *req) struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 iv[AES_BLOCK_SIZE]; - u8 ks[AES_BLOCK_SIZE]; + u8 ks[2 * AES_BLOCK_SIZE]; u8 tag[AES_BLOCK_SIZE]; u64 dg[2] = {}; + int nrounds = num_rounds(&ctx->aes_key); int err; if (req->assoclen) @@ -360,39 +384,39 @@ static int gcm_encrypt(struct aead_request *req) memcpy(iv, req->iv, GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE); - if (likely(may_use_simd())) { - kernel_neon_begin(); + err = skcipher_walk_aead_encrypt(&walk, req, false); - pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, - num_rounds(&ctx->aes_key)); + if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + u32 const *rk = NULL; + + kernel_neon_begin(); + pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - pmull_gcm_encrypt_block(ks, iv, NULL, - num_rounds(&ctx->aes_key)); + pmull_gcm_encrypt_block(ks, iv, NULL, nrounds); put_unaligned_be32(3, iv + GCM_IV_SIZE); - kernel_neon_end(); + pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds); + put_unaligned_be32(4, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, false); + do { + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; - while (walk.nbytes >= AES_BLOCK_SIZE) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + if (rk) + kernel_neon_begin(); - kernel_neon_begin(); pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, &ctx->ghash_key, - iv, ctx->aes_key.key_enc, - num_rounds(&ctx->aes_key), ks); + iv, rk, nrounds, ks); kernel_neon_end(); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } + walk.nbytes % (2 * AES_BLOCK_SIZE)); + + rk = ctx->aes_key.key_enc; + } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } else { - __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, - num_rounds(&ctx->aes_key)); + __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, false); - while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; @@ -400,8 +424,7 @@ static int gcm_encrypt(struct aead_request *req) do { __aes_arm64_encrypt(ctx->aes_key.key_enc, - ks, iv, - num_rounds(&ctx->aes_key)); + ks, iv, nrounds); crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE); crypto_inc(iv, AES_BLOCK_SIZE); @@ -418,19 +441,28 @@ static int gcm_encrypt(struct aead_request *req) } if (walk.nbytes) __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, - num_rounds(&ctx->aes_key)); + nrounds); } /* handle the tail */ if (walk.nbytes) { u8 buf[GHASH_BLOCK_SIZE]; + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + u8 *head = NULL; crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks, walk.nbytes); - memcpy(buf, walk.dst.virt.addr, walk.nbytes); - memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + if (walk.nbytes > GHASH_BLOCK_SIZE) { + head = dst; + dst += GHASH_BLOCK_SIZE; + nbytes %= GHASH_BLOCK_SIZE; + } + + memcpy(buf, dst, nbytes); + memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); err = skcipher_walk_done(&walk, 0); } @@ -453,10 +485,11 @@ static int gcm_decrypt(struct aead_request *req) struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead); unsigned int authsize = crypto_aead_authsize(aead); struct skcipher_walk walk; - u8 iv[AES_BLOCK_SIZE]; + u8 iv[2 * AES_BLOCK_SIZE]; u8 tag[AES_BLOCK_SIZE]; - u8 buf[GHASH_BLOCK_SIZE]; + u8 buf[2 * GHASH_BLOCK_SIZE]; u64 dg[2] = {}; + int nrounds = num_rounds(&ctx->aes_key); int err; if (req->assoclen) @@ -465,39 +498,53 @@ static int gcm_decrypt(struct aead_request *req) memcpy(iv, req->iv, GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE); - if (likely(may_use_simd())) { - kernel_neon_begin(); + err = skcipher_walk_aead_decrypt(&walk, req, false); + + if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + u32 const *rk = NULL; - pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, - num_rounds(&ctx->aes_key)); + kernel_neon_begin(); + pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - kernel_neon_end(); - err = skcipher_walk_aead_decrypt(&walk, req, false); + do { + int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; + int rem = walk.total - blocks * AES_BLOCK_SIZE; - while (walk.nbytes >= AES_BLOCK_SIZE) { - int blocks = walk.nbytes / AES_BLOCK_SIZE; + if (rk) + kernel_neon_begin(); - kernel_neon_begin(); pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, &ctx->ghash_key, - iv, ctx->aes_key.key_enc, - num_rounds(&ctx->aes_key)); + iv, rk, nrounds); + + /* check if this is the final iteration of the loop */ + if (rem < (2 * AES_BLOCK_SIZE)) { + u8 *iv2 = iv + AES_BLOCK_SIZE; + + if (rem > AES_BLOCK_SIZE) { + memcpy(iv2, iv, AES_BLOCK_SIZE); + crypto_inc(iv2, AES_BLOCK_SIZE); + } + + pmull_gcm_encrypt_block(iv, iv, NULL, nrounds); + + if (rem > AES_BLOCK_SIZE) + pmull_gcm_encrypt_block(iv2, iv2, NULL, + nrounds); + } + kernel_neon_end(); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } - if (walk.nbytes) - pmull_gcm_encrypt_block(iv, iv, NULL, - num_rounds(&ctx->aes_key)); + walk.nbytes % (2 * AES_BLOCK_SIZE)); + + rk = ctx->aes_key.key_enc; + } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } else { - __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, - num_rounds(&ctx->aes_key)); + __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, false); - while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; @@ -508,8 +555,7 @@ static int gcm_decrypt(struct aead_request *req) do { __aes_arm64_encrypt(ctx->aes_key.key_enc, - buf, iv, - num_rounds(&ctx->aes_key)); + buf, iv, nrounds); crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE); crypto_inc(iv, AES_BLOCK_SIZE); @@ -522,14 +568,24 @@ static int gcm_decrypt(struct aead_request *req) } if (walk.nbytes) __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, - num_rounds(&ctx->aes_key)); + nrounds); } /* handle the tail */ if (walk.nbytes) { - memcpy(buf, walk.src.virt.addr, walk.nbytes); - memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + const u8 *src = walk.src.virt.addr; + const u8 *head = NULL; + unsigned int nbytes = walk.nbytes; + + if (walk.nbytes > GHASH_BLOCK_SIZE) { + head = src; + src += GHASH_BLOCK_SIZE; + nbytes %= GHASH_BLOCK_SIZE; + } + + memcpy(buf, src, nbytes); + memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, walk.nbytes); @@ -554,7 +610,7 @@ static int gcm_decrypt(struct aead_request *req) static struct aead_alg gcm_aes_alg = { .ivsize = GCM_IV_SIZE, - .chunksize = AES_BLOCK_SIZE, + .chunksize = 2 * AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm_setkey, .setauthsize = gcm_setauthsize, diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index efbeb3e0dcfb..17fac2889f56 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -99,7 +99,6 @@ static struct shash_alg alg = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index fd1ff2b13dfa..261f5195cab7 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -114,7 +114,6 @@ static struct shash_alg algs[] = { { .cra_name = "sha224", .cra_driver_name = "sha224-ce", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -129,7 +128,6 @@ static struct shash_alg algs[] = { { .cra_name = "sha256", .cra_driver_name = "sha256-ce", .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index e8880ccdc71f..4aedeaefd61f 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -67,8 +67,7 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha256_state), .base.cra_name = "sha256", .base.cra_driver_name = "sha256-arm64", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_priority = 125, .base.cra_blocksize = SHA256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -80,8 +79,7 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha256_state), .base.cra_name = "sha224", .base.cra_driver_name = "sha224-arm64", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_priority = 125, .base.cra_blocksize = SHA224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; @@ -153,7 +151,6 @@ static struct shash_alg neon_algs[] = { { .base.cra_name = "sha256", .base.cra_driver_name = "sha256-arm64-neon", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -166,7 +163,6 @@ static struct shash_alg neon_algs[] = { { .base.cra_name = "sha224", .base.cra_driver_name = "sha224-arm64-neon", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index da8222e528bd..a336feac0f59 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -105,7 +105,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-ce", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -117,7 +116,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-ce", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -129,7 +127,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-ce", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -141,7 +138,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-ce", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index a77c8632a589..f2c5f28c622a 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -87,7 +87,6 @@ static struct shash_alg algs[] = { { .base.cra_name = "sha384", .base.cra_driver_name = "sha384-ce", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -100,7 +99,6 @@ static struct shash_alg algs[] = { { .base.cra_name = "sha512", .base.cra_driver_name = "sha512-ce", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index 27db4851e380..325b23b43a9b 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c @@ -63,7 +63,6 @@ static struct shash_alg algs[] = { { .base.cra_name = "sha512", .base.cra_driver_name = "sha512-arm64", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -76,7 +75,6 @@ static struct shash_alg algs[] = { { .base.cra_name = "sha384", .base.cra_driver_name = "sha384-arm64", .base.cra_priority = 150, - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index 3b4948f7e26f..88938a20d9b2 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -72,7 +72,6 @@ static struct shash_alg sm3_alg = { .descsize = sizeof(struct sm3_state), .base.cra_name = "sm3", .base.cra_driver_name = "sm3-ce", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SM3_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 3a9b84d39d71..6cd5d77b6b44 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 0db62a4cbce2..709208dfdc8b 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,10 +12,12 @@ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H +#include <linux/efi.h> #include <linux/memblock.h> #include <linux/psci.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> @@ -29,18 +31,22 @@ /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI +pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); + /* ACPI table mapping after acpi_permanent_mmap is set */ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { + /* For normal memory we already have a cacheable mapping. */ + if (memblock_is_map_memory(phys)) + return (void __iomem *)__phys_to_virt(phys); + /* - * EFI's reserve_regions() call adds memory with the WB attribute - * to memblock via early_init_dt_add_memory_arch(). + * We should still honor the memory's attribute here because + * crash dump kernel possibly excludes some ACPI (reclaim) + * regions from memblock list. */ - if (!memblock_is_memory(phys)) - return ioremap(phys, size); - - return ioremap_cache(phys, size); + return __ioremap(phys, size, __acpi_get_mem_attribute(phys)); } #define acpi_os_ioremap acpi_os_ioremap @@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu) * for compatibility. */ #define acpi_disable_cmcff 1 -pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr); +static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +{ + return __acpi_get_mem_attribute(addr); +} #endif /* CONFIG_ACPI_APEI */ #ifdef CONFIG_ACPI_NUMA int arm64_acpi_numa_init(void); -int acpi_numa_get_nid(unsigned int cpu, u64 hwid); +int acpi_numa_get_nid(unsigned int cpu); +void acpi_map_cpus_to_nodes(void); #else static inline int arm64_acpi_numa_init(void) { return -ENOSYS; } -static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; } +static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } +static inline void acpi_map_cpus_to_nodes(void) { } #endif /* CONFIG_ACPI_NUMA */ #define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..9bca54dda75c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -40,17 +40,6 @@ #include <asm/cmpxchg.h> -#define ___atomic_add_unless(v, a, u, sfx) \ -({ \ - typeof((v)->counter) c, old; \ - \ - c = atomic##sfx##_read(v); \ - while (c != (u) && \ - (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c; \ - }) - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) @@ -61,21 +50,11 @@ #define atomic_add_return_release atomic_add_return_release #define atomic_add_return atomic_add_return -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return_acquire #define atomic_sub_return_release atomic_sub_return_release #define atomic_sub_return atomic_sub_return -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) - #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add_acquire #define atomic_fetch_add_release atomic_fetch_add_release @@ -119,13 +98,6 @@ cmpxchg_release(&((v)->counter), (old), (new)) #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot /* @@ -140,21 +112,11 @@ #define atomic64_add_return_release atomic64_add_return_release #define atomic64_add_return atomic64_add_return -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return_acquire #define atomic64_sub_return_release atomic64_sub_return_release #define atomic64_sub_return atomic64_sub_return -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) - #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -195,16 +157,9 @@ #define atomic64_cmpxchg_release atomic_cmpxchg_release #define atomic64_cmpxchg atomic_cmpxchg -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) -#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) #define atomic64_andnot atomic64_andnot -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif #endif diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f11518af96a9..822a9192c551 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -128,6 +128,19 @@ do { \ __u.__val; \ }) +#define smp_cond_load_relaxed(ptr, cond_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(__PTR, VAL); \ + } \ + VAL; \ +}) + #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 9c19594ce7cb..10d536b1af74 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,22 +17,11 @@ #define __ASM_BITOPS_H #include <linux/compiler.h> -#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif -/* - * Little endian assembly atomic bitops. - */ -extern void set_bit(int nr, volatile unsigned long *p); -extern void clear_bit(int nr, volatile unsigned long *p); -extern void change_bit(int nr, volatile unsigned long *p); -extern int test_and_set_bit(int nr, volatile unsigned long *p); -extern int test_and_clear_bit(int nr, volatile unsigned long *p); -extern int test_and_change_bit(int nr, volatile unsigned long *p); - #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-__fls.h> @@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p); #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> - -/* - * Ext2 is defined to use little-endian byte ordering. - */ -#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5df5cfe1c143..5ee5bca8c24b 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -21,12 +21,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 #define CTR_IDC_SHIFT 28 #define CTR_DIC_SHIFT 29 +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define ICACHE_POLICY_VPIPT 0 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index d264a7274811..19844211a4e6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -19,6 +19,7 @@ #ifndef __ASM_CACHEFLUSH_H #define __ASM_CACHEFLUSH_H +#include <linux/kgdb.h> #include <linux/mm.h> /* @@ -71,7 +72,7 @@ * - kaddr - page address * - size - region size */ -extern void flush_icache_range(unsigned long start, unsigned long end); +extern void __flush_icache_range(unsigned long start, unsigned long end); extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); @@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + __flush_icache_range(start, end); + + /* + * IPI all online CPUs so that they undergo a context synchronization + * event and are forced to refetch the new instructions. + */ +#ifdef CONFIG_KGDB + /* + * KGDB performs cache maintenance with interrupts disabled, so we + * will deadlock trying to IPI the secondary CPUs. In theory, we can + * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that + * just means that KGDB will elide the maintenance altogether! As it + * turns out, KGDB uses IPIs to round-up the secondary CPUs during + * the patching operation, so we don't need extra IPIs here anyway. + * In which case, add a KGDB-specific bodge and return early. + */ + if (kgdb_connected && irqs_disabled()) + return; +#endif + kick_all_cpus_sync(); +} + static inline void flush_cache_mm(struct mm_struct *mm) { } diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8a699c708fc9..ae1f70450fb2 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -49,7 +49,9 @@ #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 +#define ARM64_MISMATCHED_CACHE_TYPE 31 +#define ARM64_HAS_STAGE2_FWB 32 -#define ARM64_NCAPS 31 +#define ARM64_NCAPS 33 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 192d791f1103..7ed320895d1f 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, #define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) #define efi_is_64bit() (true) +#define efi_table_attr(table, attr, instance) \ + ((table##_t *)instance)->attr + #define efi_call_proto(protocol, f, instance, ...) \ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index fa92747a49c8..dd1ad3950ef5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -16,13 +16,15 @@ #ifndef __ASM_FP_H #define __ASM_FP_H -#include <asm/ptrace.h> #include <asm/errno.h> +#include <asm/ptrace.h> #include <asm/processor.h> #include <asm/sigcontext.h> +#include <asm/sysreg.h> #ifndef __ASSEMBLY__ +#include <linux/build_bug.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/stddef.h> @@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task, extern int sve_set_current_vl(unsigned long arg); extern int sve_get_current_vl(void); +static inline void sve_user_disable(void) +{ + sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0); +} + +static inline void sve_user_enable(void) +{ + sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN); +} + /* * Probing and setup functions. * Calls to these functions must be serialised with one another. @@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void) return -EINVAL; } +static inline void sve_user_disable(void) { BUILD_BUG(); } +static inline void sve_user_enable(void) { BUILD_BUG(); } + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 41770766d964..6a53e59ced95 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg, struct task_struct; struct notifier_block; +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f62c56b1793f..c6802dea6cab 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); -bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); - int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index a0fee6985e6a..b2b0c6405eb0 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -8,8 +8,6 @@ struct pt_regs; -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); - static inline int nr_legacy_irqs(void) { return 0; diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 6deb8d726041..d5a44cf859e9 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -48,7 +48,6 @@ struct kprobe_ctlblk { unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; - struct pt_regs jprobe_saved_regs; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 6dd285e979c9..aa45df752a16 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -23,6 +23,7 @@ #include <asm/types.h> /* Hyp Configuration Register (HCR) bits */ +#define HCR_FWB (UL(1) << 46) #define HCR_TEA (UL(1) << 37) #define HCR_TERR (UL(1) << 36) #define HCR_TLOR (UL(1) << 35) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 1dab3a984608..6106a85ae0be 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -63,6 +63,8 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) /* trap error record accesses */ vcpu->arch.hcr_el2 |= HCR_TERR; } + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + vcpu->arch.hcr_el2 |= HCR_FWB; if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) vcpu->arch.hcr_el2 &= ~HCR_RW; @@ -81,6 +83,21 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) return (unsigned long *)&vcpu->arch.hcr_el2; } +static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 &= ~HCR_TWE; +} + +static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 |= HCR_TWE; +} + +static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.vsesr_el2; +} + static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) { vcpu->arch.vsesr_el2 = vsesr; @@ -140,7 +157,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; } /* @@ -190,8 +207,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) u32 mode; if (vcpu_mode_is_32bit(vcpu)) { - mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; - return mode > COMPAT_PSR_MODE_USR; + mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; + return mode > PSR_AA32_MODE_USR; } mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; @@ -329,7 +346,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr |= (1 << 25); @@ -340,7 +357,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); + return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fe8777b12f86..f26055f2306e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -350,6 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events); + +int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); @@ -378,16 +383,23 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, int kvm_perf_init(void); int kvm_perf_teardown(void); +void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); + struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); -void __kvm_set_tpidr_el2(u64 tpidr_el2); DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { - u64 tpidr_el2; + /* + * Calculate the raw per-cpu offset without a translation from the + * kernel's mapping to the linear mapping, and store it in tpidr_el2 + * so that we can use adr_l to access per-cpu variables in EL2. + */ + u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - + (u64)kvm_ksym_ref(kvm_host_cpu_state)); /* * Call initialization code, and switch to the full blown HYP code. @@ -396,17 +408,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, * cpus_have_const_cap() wrapper. */ BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); - __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); - - /* - * Calculate the raw per-cpu offset without a translation from the - * kernel's mapping to the linear mapping, and store it in tpidr_el2 - * so that we can use adr_l to access per-cpu variables in EL2. - */ - tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state) - - (u64)kvm_ksym_ref(kvm_host_cpu_state); - - kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); + __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); } static inline bool kvm_arch_check_sve_has_vhe(void) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index fb9a7127bb75..d6fff7de5539 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -169,8 +169,12 @@ phys_addr_t kvm_get_idmap_vector(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); -#define kvm_set_pte(ptep, pte) set_pte(ptep, pte) -#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) +#define kvm_mk_pmd(ptep) \ + __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE) +#define kvm_mk_pud(pmdp) \ + __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE) +#define kvm_mk_pgd(pudp) \ + __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE) static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { @@ -267,6 +271,15 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) { void *va = page_address(pfn_to_page(pfn)); + /* + * With FWB, we ensure that the guest always accesses memory using + * cacheable attributes, and we don't have to clean to PoC when + * faulting in pages. Furthermore, FWB implies IDC, so cleaning to + * PoU is not required either in this case. + */ + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + return; + kvm_flush_dcache_to_poc(va, size); } @@ -287,20 +300,26 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, static inline void __kvm_flush_dcache_pte(pte_t pte) { - struct page *page = pte_page(pte); - kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { + struct page *page = pte_page(pte); + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); + } } static inline void __kvm_flush_dcache_pmd(pmd_t pmd) { - struct page *page = pmd_page(pmd); - kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { + struct page *page = pmd_page(pmd); + kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); + } } static inline void __kvm_flush_dcache_pud(pud_t pud) { - struct page *page = pud_page(pud); - kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { + struct page *page = pud_page(pud); + kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); + } } #define kvm_virt_to_phys(x) __pa_symbol(x) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 49d99214f43c..b96442960aea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -155,6 +155,13 @@ #define MT_S2_NORMAL 0xf #define MT_S2_DEVICE_nGnRE 0x1 +/* + * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 + * Stage-2 enforces Normal-WB and Device-nGnRE + */ +#define MT_S2_FWB_NORMAL 6 +#define MT_S2_FWB_DEVICE_nGnRE 1 + #ifdef CONFIG_ARM64_4K_PAGES #define IOREMAP_MAX_ORDER (PUD_SHIFT) #else diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h index f922eaf780f9..fb9d137256a6 100644 --- a/arch/arm64/include/asm/neon.h +++ b/arch/arm64/include/asm/neon.h @@ -19,11 +19,4 @@ void kernel_neon_begin(void); void kernel_neon_end(void); -/* - * Temporary macro to allow the crypto code to compile. Note that the - * semantics of kernel_neon_begin_partial() are now different from the - * original as it does not allow being called in an interrupt context. - */ -#define kernel_neon_begin_partial(num_regs) kernel_neon_begin() - #endif /* ! __ASM_NEON_H */ diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 01bc46d5b43a..626ad01e83bf 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance); void __init numa_free_distance(void); void __init early_map_cpu_to_node(unsigned int cpu, int nid); void numa_store_cpu_info(unsigned int cpu); +void numa_add_cpu(unsigned int cpu); +void numa_remove_cpu(unsigned int cpu); #else /* CONFIG_NUMA */ static inline void numa_store_cpu_info(unsigned int cpu) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arm64_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 108ecad7acc5..78b942c1bea4 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -67,8 +67,28 @@ #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN) -#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN) +#define PAGE_S2_MEMATTR(attr) \ + ({ \ + u64 __val; \ + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \ + __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ + else \ + __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \ + __val; \ + }) + +#define PAGE_S2_XN \ + ({ \ + u64 __val; \ + if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) \ + __val = 0; \ + else \ + __val = PTE_S2_XN; \ + __val; \ + }) + +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(NORMAL) | PTE_S2_RDONLY | PAGE_S2_XN) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index a73ae1e49200..79657ad91397 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); - regs->pstate = COMPAT_PSR_MODE_USR; + regs->pstate = PSR_AA32_MODE_USR; if (pc & 1) - regs->pstate |= COMPAT_PSR_T_BIT; + regs->pstate |= PSR_AA32_T_BIT; #ifdef __AARCH64EB__ - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; #endif regs->compat_sp = sp; @@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void); #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +/* + * For CONFIG_GCC_PLUGIN_STACKLEAK + * + * These need to be macros because otherwise we get stuck in a nightmare + * of header definitions for the use of task_stack_page. + */ + +#define current_top_of_stack() \ +({ \ + struct stack_info _info; \ + BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ + _info.high; \ +}) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 6069d66e0bc2..177b851ca6d9 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -35,36 +35,39 @@ #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 -/* AArch32 CPSR bits */ -#define COMPAT_PSR_MODE_MASK 0x0000001f -#define COMPAT_PSR_MODE_USR 0x00000010 -#define COMPAT_PSR_MODE_FIQ 0x00000011 -#define COMPAT_PSR_MODE_IRQ 0x00000012 -#define COMPAT_PSR_MODE_SVC 0x00000013 -#define COMPAT_PSR_MODE_ABT 0x00000017 -#define COMPAT_PSR_MODE_HYP 0x0000001a -#define COMPAT_PSR_MODE_UND 0x0000001b -#define COMPAT_PSR_MODE_SYS 0x0000001f -#define COMPAT_PSR_T_BIT 0x00000020 -#define COMPAT_PSR_F_BIT 0x00000040 -#define COMPAT_PSR_I_BIT 0x00000080 -#define COMPAT_PSR_A_BIT 0x00000100 -#define COMPAT_PSR_E_BIT 0x00000200 -#define COMPAT_PSR_J_BIT 0x01000000 -#define COMPAT_PSR_Q_BIT 0x08000000 -#define COMPAT_PSR_V_BIT 0x10000000 -#define COMPAT_PSR_C_BIT 0x20000000 -#define COMPAT_PSR_Z_BIT 0x40000000 -#define COMPAT_PSR_N_BIT 0x80000000 -#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ -#define COMPAT_PSR_GE_MASK 0x000f0000 +/* SPSR_ELx bits for exceptions taken from AArch32 */ +#define PSR_AA32_MODE_MASK 0x0000001f +#define PSR_AA32_MODE_USR 0x00000010 +#define PSR_AA32_MODE_FIQ 0x00000011 +#define PSR_AA32_MODE_IRQ 0x00000012 +#define PSR_AA32_MODE_SVC 0x00000013 +#define PSR_AA32_MODE_ABT 0x00000017 +#define PSR_AA32_MODE_HYP 0x0000001a +#define PSR_AA32_MODE_UND 0x0000001b +#define PSR_AA32_MODE_SYS 0x0000001f +#define PSR_AA32_T_BIT 0x00000020 +#define PSR_AA32_F_BIT 0x00000040 +#define PSR_AA32_I_BIT 0x00000080 +#define PSR_AA32_A_BIT 0x00000100 +#define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_DIT_BIT 0x01000000 +#define PSR_AA32_Q_BIT 0x08000000 +#define PSR_AA32_V_BIT 0x10000000 +#define PSR_AA32_C_BIT 0x20000000 +#define PSR_AA32_Z_BIT 0x40000000 +#define PSR_AA32_N_BIT 0x80000000 +#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define PSR_AA32_GE_MASK 0x000f0000 #ifdef CONFIG_CPU_BIG_ENDIAN -#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT +#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT #else -#define COMPAT_PSR_ENDSTATE 0 +#define PSR_AA32_ENDSTATE 0 #endif +/* AArch32 CPSR bits, as seen in AArch32 */ +#define COMPAT_PSR_DIT_BIT 0x00200000 + /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. @@ -111,6 +114,30 @@ #define compat_sp_fiq regs[29] #define compat_lr_fiq regs[30] +static inline unsigned long compat_psr_to_pstate(const unsigned long psr) +{ + unsigned long pstate; + + pstate = psr & ~COMPAT_PSR_DIT_BIT; + + if (psr & COMPAT_PSR_DIT_BIT) + pstate |= PSR_AA32_DIT_BIT; + + return pstate; +} + +static inline unsigned long pstate_to_compat_psr(const unsigned long pstate) +{ + unsigned long psr; + + psr = pstate & ~PSR_AA32_DIT_BIT; + + if (pstate & PSR_AA32_DIT_BIT) + psr |= COMPAT_PSR_DIT_BIT; + + return psr; +} + /* * This struct defines the way the registers are stored on the stack during an * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for @@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs) #ifdef CONFIG_COMPAT #define compat_thumb_mode(regs) \ - (((regs)->pstate & COMPAT_PSR_T_BIT)) + (((regs)->pstate & PSR_AA32_T_BIT)) #else #define compat_thumb_mode(regs) (0) #endif diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index e073e6886685..ffe47d766c25 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs, unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) -bool _on_sdei_stack(unsigned long sp); -static inline bool on_sdei_stack(unsigned long sp) +struct stack_info; + +bool _on_sdei_stack(unsigned long sp, struct stack_info *info); +static inline bool on_sdei_stack(unsigned long sp, + struct stack_info *info) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) return false; if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return false; if (in_nmi()) - return _on_sdei_stack(sp); + return _on_sdei_stack(sp, info); return false; } diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 26c5bd7d88d8..38116008d18b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -16,123 +16,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/lse.h> -#include <asm/spinlock_types.h> -#include <asm/processor.h> - -/* - * Spinlock implementation. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - */ - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval, newval; - - asm volatile( - /* Atomically increment the next ticket. */ - ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ -" prfm pstl1strm, %3\n" -"1: ldaxr %w0, %3\n" -" add %w1, %w0, %w5\n" -" stxr %w2, %w1, %3\n" -" cbnz %w2, 1b\n", - /* LSE atomics */ -" mov %w2, %w5\n" -" ldadda %w2, %w0, %3\n" - __nops(3) - ) - - /* Did we get the lock? */ -" eor %w1, %w0, %w0, ror #16\n" -" cbz %w1, 3f\n" - /* - * No: spin on the owner. Send a local event to avoid missing an - * unlock before the exclusive load. - */ -" sevl\n" -"2: wfe\n" -" ldaxrh %w2, %4\n" -" eor %w1, %w2, %w0, lsr #16\n" -" cbnz %w1, 2b\n" - /* We got the lock. Critical section starts here. */ -"3:" - : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) - : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " prfm pstl1strm, %2\n" - "1: ldaxr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 2f\n" - " add %w0, %w0, %3\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" - "2:", - /* LSE atomics */ - " ldr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 1f\n" - " add %w1, %w0, %3\n" - " casa %w0, %w1, %2\n" - " sub %w1, %w1, %3\n" - " eor %w1, %w1, %w0\n" - "1:") - : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) - : "I" (1 << TICKET_SHIFT) - : "memory"); - - return !tmp; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " ldrh %w1, %0\n" - " add %w1, %w1, #1\n" - " stlrh %w1, %0", - /* LSE atomics */ - " mov %w1, #1\n" - " staddlh %w1, %0\n" - __nops(1)) - : "=Q" (lock->owner), "=&r" (tmp) - : - : "memory"); -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.owner == lock.next; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return !arch_spin_value_unlocked(READ_ONCE(*lock)); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - arch_spinlock_t lockval = READ_ONCE(*lock); - return (lockval.next - lockval.owner) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - #include <asm/qrwlock.h> +#include <asm/qspinlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 6b856012c51b..a157ff465e27 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -20,22 +20,7 @@ # error "please don't include this file directly" #endif -#include <linux/types.h> - -#define TICKET_SHIFT 16 - -typedef struct { -#ifdef __AARCH64EB__ - u16 next; - u16 owner; -#else - u16 owner; - u16 next; -#endif -} __aligned(4) arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } - +#include <asm-generic/qspinlock_types.h> #include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 902f9edacbea..e86737b7c924 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -32,6 +32,21 @@ struct stackframe { #endif }; +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, + STACK_TYPE_IRQ, + STACK_TYPE_OVERFLOW, + STACK_TYPE_SDEI_NORMAL, + STACK_TYPE_SDEI_CRITICAL, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_irq_stack(unsigned long sp) +static inline bool on_irq_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; @@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp) if (!low) return false; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_IRQ; + } + + return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp) +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_OVERFLOW; + } + + return true; } #else -static inline bool on_overflow_stack(unsigned long sp) { return false; } +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { return false; } #endif + /* * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { - if (on_task_stack(tsk, sp)) + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) return false; - if (on_irq_stack(sp)) + if (on_irq_stack(sp, info)) return true; - if (on_overflow_stack(sp)) + if (on_overflow_stack(sp, info)) return true; - if (on_sdei_stack(sp)) + if (on_sdei_stack(sp, info)) return true; return false; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 709a574468f0..ad8be16a39c9 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,7 +20,13 @@ #include <linux/compat.h> #include <linux/err.h> -extern const void *sys_call_table[]; +typedef long (*syscall_fn_t)(struct pt_regs *regs); + +extern const syscall_fn_t sys_call_table[]; + +#ifdef CONFIG_COMPAT +extern const syscall_fn_t compat_sys_call_table[]; +#endif static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..a4477e515b79 --- /dev/null +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - arm64 specific wrappers to syscall definitions + * + * Based on arch/x86/include/asm_syscall_wrapper.h + */ + +#ifndef __ASM_SYSCALL_WRAPPER_H +#define __ASM_SYSCALL_WRAPPER_H + +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) + +#ifdef CONFIG_COMPAT + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(void) + +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__arm64_compat_sys_##name); + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#ifndef SYSCALL_DEFINE0 +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(void) +#endif + +#ifndef COND_SYSCALL +#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name) +#endif + +#ifndef SYS_NI +#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); +#endif + +#endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a8f84812c6e8..c1470931b897 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -314,6 +314,8 @@ #define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) #define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3) #define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) +#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6) +#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7) #define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) #define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) #define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2) @@ -436,7 +438,8 @@ #define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE @@ -452,9 +455,9 @@ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0) - +#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL2 set/clear bits" +#endif /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_UCI (1 << 26) @@ -473,7 +476,8 @@ #define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \ (1 << 29)) #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) @@ -492,8 +496,9 @@ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ SCTLR_EL1_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) +#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL1 set/clear bits" +#endif /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 @@ -576,6 +581,7 @@ #define ID_AA64MMFR1_VMIDBITS_16 2 /* id_aa64mmfr2 */ +#define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_IESB_SHIFT 12 @@ -739,19 +745,6 @@ asm( write_sysreg(__scs_new, sysreg); \ } while (0) -static inline void config_sctlr_el1(u32 clear, u32 set) -{ - u32 val; - - SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS; - SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS; - - val = read_sysreg(sctlr_el1); - val &= ~clear; - val |= set; - write_sysreg(val, sctlr_el1); -} - #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index ffdaea7954bb..0ad1cf233470 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); /* * The ASID allocator will either invalidate the ASID or mark diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index dfc61d73f740..a4a1901140ee 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, dsb(ish); } +static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + __tlbi(vaae1is, addr); + dsb(ish); +} #endif #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index df48212f767b..49a0fee4f89b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -11,7 +11,7 @@ struct cpu_topology { int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; - cpumask_t llc_siblings; + cpumask_t llc_sibling; }; extern struct cpu_topology cpu_topology[NR_CPUS]; @@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); +void remove_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); #ifdef CONFIG_NUMA diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index a0baa9af5487..e0d0f5b856e7 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -43,7 +43,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 398 +#define __NR_compat_syscalls 399 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index ef292160748c..2cd6dcf8d246 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall) #define __NR_fsync 118 __SYSCALL(__NR_fsync, sys_fsync) #define __NR_sigreturn 119 -__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper) +__SYSCALL(__NR_sigreturn, compat_sys_sigreturn) #define __NR_clone 120 __SYSCALL(__NR_clone, sys_clone) #define __NR_setdomainname 121 @@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16) #define __NR_prctl 172 __SYSCALL(__NR_prctl, sys_prctl) #define __NR_rt_sigreturn 173 -__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper) +__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn) #define __NR_rt_sigaction 174 __SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction) #define __NR_rt_sigprocmask 175 @@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) #define __NR_rt_sigsuspend 179 __SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend) #define __NR_pread64 180 -__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper) +__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64) #define __NR_pwrite64 181 -__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper) +__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64) #define __NR_chown 182 __SYSCALL(__NR_chown, sys_chown16) #define __NR_getcwd 183 @@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork) #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ #define __NR_mmap2 192 -__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) +__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2) #define __NR_truncate64 193 -__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) +__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64) #define __NR_ftruncate64 194 -__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper) +__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64) #define __NR_stat64 195 __SYSCALL(__NR_stat64, sys_stat64) #define __NR_lstat64 196 @@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall) #define __NR_gettid 224 __SYSCALL(__NR_gettid, sys_gettid) #define __NR_readahead 225 -__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper) +__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead) #define __NR_setxattr 226 __SYSCALL(__NR_setxattr, sys_setxattr) #define __NR_lsetxattr 227 @@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres) #define __NR_clock_nanosleep 265 __SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) #define __NR_statfs64 266 -__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper) +__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64) #define __NR_fstatfs64 267 -__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper) +__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64) #define __NR_tgkill 268 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_utimes 269 __SYSCALL(__NR_utimes, compat_sys_utimes) #define __NR_arm_fadvise64_64 270 -__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper) +__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64) #define __NR_pciconfig_iobase 271 __SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase) #define __NR_pciconfig_read 272 @@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list) #define __NR_splice 340 __SYSCALL(__NR_splice, sys_splice) #define __NR_sync_file_range2 341 -__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper) +__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2) #define __NR_tee 342 __SYSCALL(__NR_tee, sys_tee) #define __NR_vmsplice 343 @@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create) #define __NR_eventfd 351 __SYSCALL(__NR_eventfd, sys_eventfd) #define __NR_fallocate 352 -__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper) +__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate) #define __NR_timerfd_settime 353 __SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) #define __NR_timerfd_gettime 354 @@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) __SYSCALL(__NR_pkey_free, sys_pkey_free) #define __NR_statx 397 __SYSCALL(__NR_statx, sys_statx) +#define __NR_rseq 398 +__SYSCALL(__NR_rseq, sys_rseq) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 4e76630dd655..97c3478ee6e7 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -39,6 +39,7 @@ #define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -154,6 +155,18 @@ struct kvm_sync_regs { struct kvm_arch_memory_slot { }; +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + /* If you need to interpret the index values, here is the key: */ #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define KVM_REG_ARM_COPROC_SHIFT 16 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 0025f8691046..95ac7374d723 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -18,7 +18,8 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ - smp.o smp_spin_table.o topology.o smccc-call.o + smp.o smp_spin_table.o topology.o smccc-call.o \ + syscall.o extra-$(CONFIG_EFI) := efi-entry.o @@ -27,7 +28,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,objcopy) arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ - sys_compat.o entry32.o + sys_compat.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 7b09487ff8fb..ed46dc188b22 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -18,6 +18,7 @@ #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/cpumask.h> +#include <linux/efi.h> #include <linux/efi-bgrt.h> #include <linux/init.h> #include <linux/irq.h> @@ -29,13 +30,9 @@ #include <asm/cputype.h> #include <asm/cpu_ops.h> +#include <asm/pgtable.h> #include <asm/smp_plat.h> -#ifdef CONFIG_ACPI_APEI -# include <linux/efi.h> -# include <asm/pgtable.h> -#endif - int acpi_noirq = 1; /* skip ACPI IRQ initialization */ int acpi_disabled = 1; EXPORT_SYMBOL(acpi_disabled); @@ -239,8 +236,7 @@ done: } } -#ifdef CONFIG_ACPI_APEI -pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) { /* * According to "Table 8 Map: EFI memory types to AArch64 memory @@ -261,4 +257,3 @@ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) return __pgprot(PROT_NORMAL_NC); return __pgprot(PROT_DEVICE_nGnRnE); } -#endif diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index d190a7b231bf..4f4f1815e047 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -26,36 +26,73 @@ #include <linux/module.h> #include <linux/topology.h> -#include <acpi/processor.h> #include <asm/numa.h> -static int cpus_in_srat; +static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; -struct __node_cpu_hwid { - u32 node_id; /* logical node containing this CPU */ - u64 cpu_hwid; /* MPIDR for this CPU */ -}; +int __init acpi_numa_get_nid(unsigned int cpu) +{ + return acpi_early_node_map[cpu]; +} + +static inline int get_cpu_for_acpi_id(u32 uid) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + if (uid == get_acpi_id_for_cpu(cpu)) + return cpu; -static struct __node_cpu_hwid early_node_cpu_hwid[NR_CPUS] = { -[0 ... NR_CPUS - 1] = {NUMA_NO_NODE, PHYS_CPUID_INVALID} }; + return -EINVAL; +} -int acpi_numa_get_nid(unsigned int cpu, u64 hwid) +static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header, + const unsigned long end) { - int i; + struct acpi_srat_gicc_affinity *pa; + int cpu, pxm, node; - for (i = 0; i < cpus_in_srat; i++) { - if (hwid == early_node_cpu_hwid[i].cpu_hwid) - return early_node_cpu_hwid[i].node_id; - } + if (srat_disabled()) + return -EINVAL; + + pa = (struct acpi_srat_gicc_affinity *)header; + if (!pa) + return -EINVAL; + + if (!(pa->flags & ACPI_SRAT_GICC_ENABLED)) + return 0; - return NUMA_NO_NODE; + pxm = pa->proximity_domain; + node = pxm_to_node(pxm); + + /* + * If we can't map the UID to a logical cpu this + * means that the UID is not part of possible cpus + * so we do not need a NUMA mapping for it, skip + * the SRAT entry and keep parsing. + */ + cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid); + if (cpu < 0) + return 0; + + acpi_early_node_map[cpu] = node; + pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm, + cpu_logical_map(cpu), node); + + return 0; +} + +void __init acpi_map_cpus_to_nodes(void) +{ + acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GICC_AFFINITY, + acpi_parse_gicc_pxm, 0); } /* Callback for Proximity Domain -> ACPI processor UID mapping */ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { int pxm, node; - phys_cpuid_t mpidr; if (srat_disabled()) return; @@ -70,12 +107,6 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) if (!(pa->flags & ACPI_SRAT_GICC_ENABLED)) return; - if (cpus_in_srat >= NR_CPUS) { - pr_warn_once("SRAT: cpu_to_node_map[%d] is too small, may not be able to use all cpus\n", - NR_CPUS); - return; - } - pxm = pa->proximity_domain; node = acpi_map_pxm_to_node(pxm); @@ -85,20 +116,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) return; } - mpidr = acpi_map_madt_entry(pa->acpi_processor_uid); - if (mpidr == PHYS_CPUID_INVALID) { - pr_err("SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n", - pxm, pa->acpi_processor_uid); - bad_srat(); - return; - } - - early_node_cpu_hwid[cpus_in_srat].node_id = node; - early_node_cpu_hwid[cpus_in_srat].cpu_hwid = mpidr; node_set(node, numa_nodes_parsed); - cpus_in_srat++; - pr_info("SRAT: PXM %d -> MPIDR 0x%Lx -> Node %d\n", - pxm, mpidr, node); } int __init arm64_acpi_numa_init(void) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 36fb069fd049..b5d603992d40 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -47,11 +47,11 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) unsigned long replptr; if (kernel_text_address(pc)) - return 1; + return true; replptr = (unsigned long)ALT_REPL_PTR(alt); if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return 0; + return false; /* * Branching into *another* alternate sequence is doomed, and diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index d4707abb2f16..92be1d12d590 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -441,8 +441,8 @@ static struct undef_hook swp_hooks[] = { { .instr_mask = 0x0fb00ff0, .instr_val = 0x01000090, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = swp_handler }, { } @@ -511,9 +511,9 @@ ret: static int cp15_barrier_set_hw_mode(bool enable) { if (enable) - config_sctlr_el1(0, SCTLR_EL1_CP15BEN); + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN); else - config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0); return 0; } @@ -521,15 +521,15 @@ static struct undef_hook cp15_barrier_hooks[] = { { .instr_mask = 0x0fff0fdf, .instr_val = 0x0e070f9a, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = cp15barrier_handler, }, { .instr_mask = 0x0fff0fff, .instr_val = 0x0e070f95, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = cp15barrier_handler, }, { } @@ -548,9 +548,9 @@ static int setend_set_hw_mode(bool enable) return -EINVAL; if (enable) - config_sctlr_el1(SCTLR_EL1_SED, 0); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0); else - config_sctlr_el1(0, SCTLR_EL1_SED); + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED); return 0; } @@ -562,10 +562,10 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) if (big_endian) { insn = "setend be"; - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; } else { insn = "setend le"; - regs->pstate &= ~COMPAT_PSR_E_BIT; + regs->pstate &= ~PSR_AA32_E_BIT; } trace_instruction_emulation(insn, regs->pc); @@ -593,16 +593,16 @@ static struct undef_hook setend_hooks[] = { { .instr_mask = 0xfffffdff, .instr_val = 0xf1010000, - .pstate_mask = COMPAT_PSR_MODE_MASK, - .pstate_val = COMPAT_PSR_MODE_USR, + .pstate_mask = PSR_AA32_MODE_MASK, + .pstate_val = PSR_AA32_MODE_USR, .fn = a32_setend_handler, }, { /* Thumb mode */ .instr_mask = 0x0000fff7, .instr_val = 0x0000b650, - .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK), - .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR), + .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), + .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), .fn = t16_setend_handler, }, {} diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h index 6c2b1b4f57c9..fad90e4935fb 100644 --- a/arch/arm64/kernel/cpu-reset.h +++ b/arch/arm64/kernel/cpu-reset.h @@ -16,13 +16,14 @@ void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); -static inline void __noreturn cpu_soft_restart(unsigned long el2_switch, - unsigned long entry, unsigned long arg0, unsigned long arg1, - unsigned long arg2) +static inline void __noreturn cpu_soft_restart(unsigned long entry, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) { typeof(__cpu_soft_restart) *restart; - el2_switch = el2_switch && !is_kernel_in_hyp_mode() && + unsigned long el2_switch = !is_kernel_in_hyp_mode() && is_hyp_mode_available(); restart = (void *)__pa_symbol(__cpu_soft_restart); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 1d2b6d768efe..dec10898d688 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -65,19 +65,24 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) } static bool -has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, - int scope) +has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, + int scope) { + u64 mask = CTR_CACHE_MINLINE_MASK; + + /* Skip matching the min line sizes for cache type check */ + if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE) + mask ^= arm64_ftr_reg_ctrel0.strict_mask; + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); - return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != - (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); + return (read_cpuid_cachetype() & mask) != + (arm64_ftr_reg_ctrel0.sys_val & mask); } static void cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) { - /* Clear SCTLR_EL1.UCT */ - config_sctlr_el1(SCTLR_EL1_UCT, 0); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); } atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); @@ -101,7 +106,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, for (i = 0; i < SZ_2K; i += 0x80) memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); - flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); + __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } static void __install_bp_hardening_cb(bp_hardening_cb_t fn, @@ -613,7 +618,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "Mismatched cache line size", .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, - .matches = has_mismatched_cache_line_size, + .matches = has_mismatched_cache_type, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .cpu_enable = cpu_enable_trap_ctr_access, + }, + { + .desc = "Mismatched cache type", + .capability = ARM64_MISMATCHED_CACHE_TYPE, + .matches = has_mismatched_cache_type, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .cpu_enable = cpu_enable_trap_ctr_access, }, @@ -649,7 +661,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .cpu_enable = enable_smccc_arch_workaround_1, ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), }, @@ -658,7 +669,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "EL2 vector hardening", .capability = ARM64_HARDEN_EL2_VECTORS, - .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), }, #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f24892a40d2c..e238b7932096 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -192,6 +192,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), @@ -214,7 +215,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = { * If we have differing I-cache policies, report it as the weakest - VIPT. */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -1026,6 +1027,14 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) } #endif +static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused) +{ + u64 val = read_sysreg_s(SYS_CLIDR_EL1); + + /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */ + WARN_ON(val & (7 << 27 | 7 << 21)); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -1182,6 +1191,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cache_dic, }, + { + .desc = "Stage-2 Force Write-Back", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_HAS_STAGE2_FWB, + .sys_reg = SYS_ID_AA64MMFR2_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR2_FWB_SHIFT, + .min_field_value = 1, + .matches = has_cpuid_feature, + .cpu_enable = cpu_has_fwb, + }, #ifdef CONFIG_ARM64_HW_AFDBM { /* @@ -1351,9 +1371,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, static void update_cpu_capabilities(u16 scope_mask) { - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); __update_cpu_capabilities(arm64_errata, scope_mask, "enabling workaround for"); + __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); } static int __enable_cpu_capability(void *arg) @@ -1408,8 +1428,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, static void __init enable_cpu_capabilities(u16 scope_mask) { - __enable_cpu_capabilities(arm64_features, scope_mask); __enable_cpu_capabilities(arm64_errata, scope_mask); + __enable_cpu_capabilities(arm64_features, scope_mask); } /* @@ -1723,7 +1743,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn) static struct undef_hook mrs_hook = { .instr_mask = 0xfff00000, .instr_val = 0xd5300000, - .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_mask = PSR_AA32_MODE_MASK, .pstate_val = PSR_MODE_EL0t, .fn = emulate_mrs, }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 28ad8799406f..09dbea221a27 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -41,19 +41,9 @@ * Context tracking subsystem. Used to instrument transitions * between user and kernel mode. */ - .macro ct_user_exit, syscall = 0 + .macro ct_user_exit #ifdef CONFIG_CONTEXT_TRACKING bl context_tracking_user_exit - .if \syscall == 1 - /* - * Save/restore needed during syscalls. Restore syscall arguments from - * the values already saved on stack during kernel_entry. - */ - ldp x0, x1, [sp] - ldp x2, x3, [sp, #S_X2] - ldp x4, x5, [sp, #S_X4] - ldp x6, x7, [sp, #S_X6] - .endif #endif .endm @@ -63,6 +53,12 @@ #endif .endm + .macro clear_gp_regs + .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 + mov x\n, xzr + .endr + .endm + /* * Bad Abort numbers *----------------- @@ -140,20 +136,21 @@ alternative_else_nop_endif // This macro corrupts x0-x3. It is the caller's duty // to save/restore them if required. - .macro apply_ssbd, state, targ, tmp1, tmp2 + .macro apply_ssbd, state, tmp1, tmp2 #ifdef CONFIG_ARM64_SSBD alternative_cb arm64_enable_wa2_handling - b \targ + b .L__asm_ssbd_skip\@ alternative_cb_end ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 - cbz \tmp2, \targ + cbz \tmp2, .L__asm_ssbd_skip\@ ldr \tmp2, [tsk, #TSK_TI_FLAGS] - tbnz \tmp2, #TIF_SSBD, \targ + tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state alternative_cb arm64_update_smccc_conduit nop // Patched to SMC/HVC #0 alternative_cb_end +.L__asm_ssbd_skip\@: #endif .endm @@ -178,20 +175,14 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 + clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. - apply_ssbd 1, 1f, x22, x23 - -#ifdef CONFIG_ARM64_SSBD - ldp x0, x1, [sp, #16 * 0] - ldp x2, x3, [sp, #16 * 1] -#endif -1: + apply_ssbd 1, x22, x23 - mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk @@ -331,8 +322,7 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: - apply_ssbd 0, 5f, x0, x1 -5: + apply_ssbd 0, x0, x1 .endif msr elr_el1, x21 // set up the return data @@ -720,14 +710,9 @@ el0_sync_compat: b.ge el0_dbg b el0_inv el0_svc_compat: - /* - * AArch32 syscall handling - */ - ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags - adrp stbl, compat_sys_call_table // load compat syscall table pointer - mov wscno, w7 // syscall number in w7 (r7) - mov wsc_nr, #__NR_compat_syscalls - b el0_svc_naked + mov x0, sp + bl el0_svc_compat_handler + b ret_to_user .align 6 el0_irq_compat: @@ -896,25 +881,6 @@ el0_error_naked: b ret_to_user ENDPROC(el0_error) - -/* - * This is the fast syscall return path. We do as little as possible here, - * and this includes saving x0 back into the kernel stack. - */ -ret_fast_syscall: - disable_daif - str x0, [sp, #S_X0] // returned x0 - ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing - and x2, x1, #_TIF_SYSCALL_WORK - cbnz x2, ret_fast_syscall_trace - and x2, x1, #_TIF_WORK_MASK - cbnz x2, work_pending - enable_step_tsk x1, x2 - kernel_exit 0 -ret_fast_syscall_trace: - enable_daif - b __sys_trace_return_skipped // we already saved x0 - /* * Ok, we need to do extra processing, enter the slow path. */ @@ -936,6 +902,9 @@ ret_to_user: cbnz x2, work_pending finish_ret_to_user: enable_step_tsk x1, x2 +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + bl stackleak_erase +#endif kernel_exit 0 ENDPROC(ret_to_user) @@ -944,85 +913,10 @@ ENDPROC(ret_to_user) */ .align 6 el0_svc: - ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags - adrp stbl, sys_call_table // load syscall table pointer - mov wscno, w8 // syscall number in w8 - mov wsc_nr, #__NR_syscalls - -#ifdef CONFIG_ARM64_SVE -alternative_if_not ARM64_SVE - b el0_svc_naked -alternative_else_nop_endif - tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set: - bic x16, x16, #_TIF_SVE // discard SVE state - str x16, [tsk, #TSK_TI_FLAGS] - - /* - * task_fpsimd_load() won't be called to update CPACR_EL1 in - * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only - * happens if a context switch or kernel_neon_begin() or context - * modification (sigreturn, ptrace) intervenes. - * So, ensure that CPACR_EL1 is already correct for the fast-path case: - */ - mrs x9, cpacr_el1 - bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0 - msr cpacr_el1, x9 // synchronised by eret to el0 -#endif - -el0_svc_naked: // compat entry point - stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number - enable_daif - ct_user_exit 1 - - tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks - b.ne __sys_trace - cmp wscno, wsc_nr // check upper syscall limit - b.hs ni_sys - mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number - ldr x16, [stbl, xscno, lsl #3] // address in the syscall table - blr x16 // call sys_* routine - b ret_fast_syscall -ni_sys: - mov x0, sp - bl do_ni_syscall - b ret_fast_syscall -ENDPROC(el0_svc) - - /* - * This is the really slow path. We're going to be doing context - * switches, and waiting for our parent to respond. - */ -__sys_trace: - cmp wscno, #NO_SYSCALL // user-issued syscall(-1)? - b.ne 1f - mov x0, #-ENOSYS // set default errno if so - str x0, [sp, #S_X0] -1: mov x0, sp - bl syscall_trace_enter - cmp w0, #NO_SYSCALL // skip the syscall? - b.eq __sys_trace_return_skipped - mov wscno, w0 // syscall number (possibly new) - mov x1, sp // pointer to regs - cmp wscno, wsc_nr // check upper syscall limit - b.hs __ni_sys_trace - ldp x0, x1, [sp] // restore the syscall args - ldp x2, x3, [sp, #S_X2] - ldp x4, x5, [sp, #S_X4] - ldp x6, x7, [sp, #S_X6] - ldr x16, [stbl, xscno, lsl #3] // address in the syscall table - blr x16 // call sys_* routine - -__sys_trace_return: - str x0, [sp, #S_X0] // save returned x0 -__sys_trace_return_skipped: mov x0, sp - bl syscall_trace_exit + bl el0_svc_handler b ret_to_user - -__ni_sys_trace: - mov x0, sp - bl do_ni_syscall - b __sys_trace_return +ENDPROC(el0_svc) .popsection // .entry.text @@ -1138,14 +1032,6 @@ __entry_tramp_data_start: #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* - * Special system call wrappers. - */ -ENTRY(sys_rt_sigreturn_wrapper) - mov x0, sp - b sys_rt_sigreturn -ENDPROC(sys_rt_sigreturn_wrapper) - -/* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: * x0 = previous task_struct (must be preserved across the switch) diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S deleted file mode 100644 index f332d5d1f6b4..000000000000 --- a/arch/arm64/kernel/entry32.S +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Compat system call wrappers - * - * Copyright (C) 2012 ARM Ltd. - * Authors: Will Deacon <will.deacon@arm.com> - * Catalin Marinas <catalin.marinas@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <linux/const.h> - -#include <asm/assembler.h> -#include <asm/asm-offsets.h> -#include <asm/errno.h> -#include <asm/page.h> - -/* - * System call wrappers for the AArch32 compatibility layer. - */ - -ENTRY(compat_sys_sigreturn_wrapper) - mov x0, sp - b compat_sys_sigreturn -ENDPROC(compat_sys_sigreturn_wrapper) - -ENTRY(compat_sys_rt_sigreturn_wrapper) - mov x0, sp - b compat_sys_rt_sigreturn -ENDPROC(compat_sys_rt_sigreturn_wrapper) - -ENTRY(compat_sys_statfs64_wrapper) - mov w3, #84 - cmp w1, #88 - csel w1, w3, w1, eq - b compat_sys_statfs64 -ENDPROC(compat_sys_statfs64_wrapper) - -ENTRY(compat_sys_fstatfs64_wrapper) - mov w3, #84 - cmp w1, #88 - csel w1, w3, w1, eq - b compat_sys_fstatfs64 -ENDPROC(compat_sys_fstatfs64_wrapper) - -/* - * Note: off_4k (w5) is always in units of 4K. If we can't do the - * requested offset because it is not page-aligned, we return -EINVAL. - */ -ENTRY(compat_sys_mmap2_wrapper) -#if PAGE_SHIFT > 12 - tst w5, #~PAGE_MASK >> 12 - b.ne 1f - lsr w5, w5, #PAGE_SHIFT - 12 -#endif - b sys_mmap_pgoff -1: mov x0, #-EINVAL - ret -ENDPROC(compat_sys_mmap2_wrapper) - -/* - * Wrappers for AArch32 syscalls that either take 64-bit parameters - * in registers or that take 32-bit parameters which require sign - * extension. - */ -ENTRY(compat_sys_pread64_wrapper) - regs_to_64 x3, x4, x5 - b sys_pread64 -ENDPROC(compat_sys_pread64_wrapper) - -ENTRY(compat_sys_pwrite64_wrapper) - regs_to_64 x3, x4, x5 - b sys_pwrite64 -ENDPROC(compat_sys_pwrite64_wrapper) - -ENTRY(compat_sys_truncate64_wrapper) - regs_to_64 x1, x2, x3 - b sys_truncate -ENDPROC(compat_sys_truncate64_wrapper) - -ENTRY(compat_sys_ftruncate64_wrapper) - regs_to_64 x1, x2, x3 - b sys_ftruncate -ENDPROC(compat_sys_ftruncate64_wrapper) - -ENTRY(compat_sys_readahead_wrapper) - regs_to_64 x1, x2, x3 - mov w2, w4 - b sys_readahead -ENDPROC(compat_sys_readahead_wrapper) - -ENTRY(compat_sys_fadvise64_64_wrapper) - mov w6, w1 - regs_to_64 x1, x2, x3 - regs_to_64 x2, x4, x5 - mov w3, w6 - b sys_fadvise64_64 -ENDPROC(compat_sys_fadvise64_64_wrapper) - -ENTRY(compat_sys_sync_file_range2_wrapper) - regs_to_64 x2, x2, x3 - regs_to_64 x3, x4, x5 - b sys_sync_file_range2 -ENDPROC(compat_sys_sync_file_range2_wrapper) - -ENTRY(compat_sys_fallocate_wrapper) - regs_to_64 x2, x2, x3 - regs_to_64 x3, x4, x5 - b sys_fallocate -ENDPROC(compat_sys_fallocate_wrapper) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 84c68b14f1b2..58c53bc96928 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -159,25 +159,6 @@ static void sve_free(struct task_struct *task) __sve_free(task); } -static void change_cpacr(u64 val, u64 mask) -{ - u64 cpacr = read_sysreg(CPACR_EL1); - u64 new = (cpacr & ~mask) | val; - - if (new != cpacr) - write_sysreg(new, CPACR_EL1); -} - -static void sve_user_disable(void) -{ - change_cpacr(0, CPACR_EL1_ZEN_EL0EN); -} - -static void sve_user_enable(void) -{ - change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN); -} - /* * TIF_SVE controls whether a task can use SVE without trapping while * in userspace, and also the way a task's FPSIMD/SVE state is stored diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 413dbe530da8..8c9644376326 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -343,14 +343,13 @@ static int get_hbp_len(u8 hbp_len) /* * Check whether bp virtual address is in kernel space. */ -int arch_check_bp_in_kernelspace(struct perf_event *bp) +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - va = info->address; - len = get_hbp_len(info->ctrl.len); + va = hw->address; + len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } @@ -421,53 +420,53 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, /* * Construct an arch_hw_breakpoint from a perf_event. */ -static int arch_build_bp_info(struct perf_event *bp) +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); - /* Type */ - switch (bp->attr.bp_type) { + switch (attr->bp_type) { case HW_BREAKPOINT_X: - info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: - info->ctrl.type = ARM_BREAKPOINT_LOAD; + hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: - info->ctrl.type = ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: - info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ - switch (bp->attr.bp_len) { + switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: - info->ctrl.len = ARM_BREAKPOINT_LEN_1; + hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - info->ctrl.len = ARM_BREAKPOINT_LEN_2; + hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_3: - info->ctrl.len = ARM_BREAKPOINT_LEN_3; + hw->ctrl.len = ARM_BREAKPOINT_LEN_3; break; case HW_BREAKPOINT_LEN_4: - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_5: - info->ctrl.len = ARM_BREAKPOINT_LEN_5; + hw->ctrl.len = ARM_BREAKPOINT_LEN_5; break; case HW_BREAKPOINT_LEN_6: - info->ctrl.len = ARM_BREAKPOINT_LEN_6; + hw->ctrl.len = ARM_BREAKPOINT_LEN_6; break; case HW_BREAKPOINT_LEN_7: - info->ctrl.len = ARM_BREAKPOINT_LEN_7; + hw->ctrl.len = ARM_BREAKPOINT_LEN_7; break; case HW_BREAKPOINT_LEN_8: - info->ctrl.len = ARM_BREAKPOINT_LEN_8; + hw->ctrl.len = ARM_BREAKPOINT_LEN_8; break; default: return -EINVAL; @@ -478,37 +477,37 @@ static int arch_build_bp_info(struct perf_event *bp) * AArch32 also requires breakpoints of length 2 for Thumb. * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (is_compat_bp(bp)) { - if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && - info->ctrl.len != ARM_BREAKPOINT_LEN_4) + if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && + hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; - } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { + } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) { /* * FIXME: Some tools (I'm looking at you perf) assume * that breakpoints should be sizeof(long). This * is nonsense. For now, we fix up the parameter * but we should probably return -EINVAL instead. */ - info->ctrl.len = ARM_BREAKPOINT_LEN_4; + hw->ctrl.len = ARM_BREAKPOINT_LEN_4; } } /* Address */ - info->address = bp->attr.bp_addr; + hw->address = attr->bp_addr; /* * Privilege * Note that we disallow combined EL0/EL1 breakpoints because * that would complicate the stepping code. */ - if (arch_check_bp_in_kernelspace(bp)) - info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; + if (arch_check_bp_in_kernelspace(hw)) + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1; else - info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; + hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0; /* Enabled? */ - info->ctrl.enabled = !bp->attr.disabled; + hw->ctrl.enabled = !attr->disabled; return 0; } @@ -516,14 +515,15 @@ static int arch_build_bp_info(struct perf_event *bp) /* * Validate the arch-specific HW Breakpoint register settings. */ -int arch_validate_hwbkpt_settings(struct perf_event *bp) +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) { - struct arch_hw_breakpoint *info = counter_arch_bp(bp); int ret; u64 alignment_mask, offset; /* Build the arch_hw_breakpoint. */ - ret = arch_build_bp_info(bp); + ret = arch_build_bp_info(bp, attr, hw); if (ret) return ret; @@ -537,42 +537,42 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * that here. */ if (is_compat_bp(bp)) { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else alignment_mask = 0x3; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ break; case 1: /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; case 2: /* Allow halfword watchpoints and breakpoints. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; default: return -EINVAL; } } else { - if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) + if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) alignment_mask = 0x3; else alignment_mask = 0x7; - offset = info->address & alignment_mask; + offset = hw->address & alignment_mask; } - info->address &= ~alignment_mask; - info->ctrl.len <<= offset; + hw->address &= ~alignment_mask; + hw->ctrl.len <<= offset; /* * Disallow per-task kernel breakpoints since these would * complicate the stepping code. */ - if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) + if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 816d03c4c913..2b3413549734 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn) return __aarch64_insn_write(addr, cpu_to_le32(insn)); } -static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) -{ - if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) - return false; - - return aarch64_insn_is_b(insn) || - aarch64_insn_is_bl(insn) || - aarch64_insn_is_svc(insn) || - aarch64_insn_is_hvc(insn) || - aarch64_insn_is_smc(insn) || - aarch64_insn_is_brk(insn) || - aarch64_insn_is_nop(insn); -} - bool __kprobes aarch64_insn_uses_literal(u32 insn) { /* ldr/ldrsw (literal), prfm */ @@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn) aarch64_insn_is_bcond(insn); } -/* - * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a - * Section B2.6.5 "Concurrent modification and execution of instructions": - * Concurrent modification and execution of instructions can lead to the - * resulting instruction performing any behavior that can be achieved by - * executing any sequence of instructions that can be executed from the - * same Exception level, except where the instruction before modification - * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, - * or SMC instruction. - */ -bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) -{ - return __aarch64_insn_hotpatch_safe(old_insn) && - __aarch64_insn_hotpatch_safe(new_insn); -} - int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; @@ -216,8 +186,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) ret = aarch64_insn_write(tp, insn); if (ret == 0) - flush_icache_range((uintptr_t)tp, - (uintptr_t)tp + AARCH64_INSN_SIZE); + __flush_icache_range((uintptr_t)tp, + (uintptr_t)tp + AARCH64_INSN_SIZE); return ret; } @@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); - /* - * aarch64_insn_patch_text_nosync() calls flush_icache_range(), - * which ends with "dsb; isb" pair guaranteeing global - * visibility. - */ /* Notify other processors with an additional increment. */ atomic_inc(&pp->cpu_count); } else { @@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) return ret; } -static -int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) +int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) { struct aarch64_insn_patch patch = { .text_addrs = addrs, @@ -272,34 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) cpu_online_mask); } -int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) -{ - int ret; - u32 insn; - - /* Unsafe to patch multiple instructions without synchronizaiton */ - if (cnt == 1) { - ret = aarch64_insn_read(addrs[0], &insn); - if (ret) - return ret; - - if (aarch64_insn_hotpatch_safe(insn, insns[0])) { - /* - * ARMv8 architecture doesn't guarantee all CPUs see - * the new instruction after returning from function - * aarch64_insn_patch_text_nosync(). So send IPIs to - * all other CPUs to achieve instruction - * synchronization. - */ - ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); - kick_all_cpus_sync(); - return ret; - } - } - - return aarch64_insn_patch_text_sync(addrs, insns, cnt); -} - static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, u32 *maskp, int *shiftp) { diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 60e5fc661f74..780a12f59a8f 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec) return 0; } -void (*handle_arch_irq)(struct pt_regs *) = NULL; - -void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) -{ - if (handle_arch_irq) - return; - - handle_arch_irq = handle_irq; -} - #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index c2dd1ad3e648..e0756416e567 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -36,7 +36,7 @@ void arch_jump_label_transform(struct jump_entry *entry, insn = aarch64_insn_gen_nop(); } - aarch64_insn_patch_text(&addr, &insn, 1); + aarch64_insn_patch_text_nosync(addr, insn); } void arch_jump_label_transform_static(struct jump_entry *entry, diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index f76ea92dff91..f6a5c6bc1434 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -184,8 +184,15 @@ void machine_kexec(struct kimage *kimage) /* Flush the reboot_code_buffer in preparation for its execution. */ __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); - flush_icache_range((uintptr_t)reboot_code_buffer, - arm64_relocate_new_kernel_size); + + /* + * Although we've killed off the secondary CPUs, we don't update + * the online mask if we're handling a crash kernel and consequently + * need to avoid flush_icache_range(), which will attempt to IPI + * the offline CPUs. Therefore, we must use the __* variant here. + */ + __flush_icache_range((uintptr_t)reboot_code_buffer, + arm64_relocate_new_kernel_size); /* Flush the kimage list and its buffers. */ kexec_list_flush(kimage); @@ -207,8 +214,7 @@ void machine_kexec(struct kimage *kimage) * relocation is complete. */ - cpu_soft_restart(kimage != kexec_crash_image, - reboot_code_buffer_phys, kimage->head, kimage->start, 0); + cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0); BUG(); /* Should never get here. */ } @@ -361,4 +367,5 @@ void arch_crash_save_vmcoreinfo(void) kimage_voffset); vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", PHYS_OFFSET); + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 33147aacdafd..8e38d5267f22 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -25,6 +25,7 @@ #include <asm/virt.h> #include <linux/acpi.h> +#include <linux/clocksource.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> @@ -446,9 +447,16 @@ static struct attribute_group armv8_pmuv3_events_attr_group = { }; PMU_FORMAT_ATTR(event, "config:0-15"); +PMU_FORMAT_ATTR(long, "config1:0"); + +static inline bool armv8pmu_event_is_64bit(struct perf_event *event) +{ + return event->attr.config1 & 0x1; +} static struct attribute *armv8_pmuv3_format_attrs[] = { &format_attr_event.attr, + &format_attr_long.attr, NULL, }; @@ -466,6 +474,21 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) /* + * We must chain two programmable counters for 64 bit events, + * except when we have allocated the 64bit cycle counter (for CPU + * cycles event). This must be called only when the event has + * a counter allocated. + */ +static inline bool armv8pmu_event_is_chained(struct perf_event *event) +{ + int idx = event->hw.idx; + + return !WARN_ON(idx < 0) && + armv8pmu_event_is_64bit(event) && + (idx != ARMV8_IDX_CYCLE_COUNTER); +} + +/* * ARMv8 low level PMU access */ @@ -503,34 +526,68 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); } -static inline int armv8pmu_select_counter(int idx) +static inline void armv8pmu_select_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); write_sysreg(counter, pmselr_el0); isb(); +} - return idx; +static inline u32 armv8pmu_read_evcntr(int idx) +{ + armv8pmu_select_counter(idx); + return read_sysreg(pmxevcntr_el0); +} + +static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) +{ + int idx = event->hw.idx; + u64 val = 0; + + val = armv8pmu_read_evcntr(idx); + if (armv8pmu_event_is_chained(event)) + val = (val << 32) | armv8pmu_read_evcntr(idx - 1); + return val; } -static inline u32 armv8pmu_read_counter(struct perf_event *event) +static inline u64 armv8pmu_read_counter(struct perf_event *event) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - u32 value = 0; + u64 value = 0; if (!armv8pmu_counter_valid(cpu_pmu, idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) value = read_sysreg(pmccntr_el0); - else if (armv8pmu_select_counter(idx) == idx) - value = read_sysreg(pmxevcntr_el0); + else + value = armv8pmu_read_hw_counter(event); return value; } -static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) +static inline void armv8pmu_write_evcntr(int idx, u32 value) +{ + armv8pmu_select_counter(idx); + write_sysreg(value, pmxevcntr_el0); +} + +static inline void armv8pmu_write_hw_counter(struct perf_event *event, + u64 value) +{ + int idx = event->hw.idx; + + if (armv8pmu_event_is_chained(event)) { + armv8pmu_write_evcntr(idx, upper_32_bits(value)); + armv8pmu_write_evcntr(idx - 1, lower_32_bits(value)); + } else { + armv8pmu_write_evcntr(idx, value); + } +} + +static inline void armv8pmu_write_counter(struct perf_event *event, u64 value) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; @@ -541,22 +598,43 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) { /* - * Set the upper 32bits as this is a 64bit counter but we only - * count using the lower 32bits and we want an interrupt when - * it overflows. + * The cycles counter is really a 64-bit counter. + * When treating it as a 32-bit counter, we only count + * the lower 32 bits, and set the upper 32-bits so that + * we get an interrupt upon 32-bit overflow. */ - u64 value64 = 0xffffffff00000000ULL | value; - - write_sysreg(value64, pmccntr_el0); - } else if (armv8pmu_select_counter(idx) == idx) - write_sysreg(value, pmxevcntr_el0); + if (!armv8pmu_event_is_64bit(event)) + value |= 0xffffffff00000000ULL; + write_sysreg(value, pmccntr_el0); + } else + armv8pmu_write_hw_counter(event, value); } static inline void armv8pmu_write_evtype(int idx, u32 val) { - if (armv8pmu_select_counter(idx) == idx) { - val &= ARMV8_PMU_EVTYPE_MASK; - write_sysreg(val, pmxevtyper_el0); + armv8pmu_select_counter(idx); + val &= ARMV8_PMU_EVTYPE_MASK; + write_sysreg(val, pmxevtyper_el0); +} + +static inline void armv8pmu_write_event_type(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + /* + * For chained events, the low counter is programmed to count + * the event of interest and the high counter is programmed + * with CHAIN event code with filters set to count at all ELs. + */ + if (armv8pmu_event_is_chained(event)) { + u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN | + ARMV8_PMU_INCLUDE_EL2; + + armv8pmu_write_evtype(idx - 1, hwc->config_base); + armv8pmu_write_evtype(idx, chain_evt); + } else { + armv8pmu_write_evtype(idx, hwc->config_base); } } @@ -567,6 +645,16 @@ static inline int armv8pmu_enable_counter(int idx) return idx; } +static inline void armv8pmu_enable_event_counter(struct perf_event *event) +{ + int idx = event->hw.idx; + + armv8pmu_enable_counter(idx); + if (armv8pmu_event_is_chained(event)) + armv8pmu_enable_counter(idx - 1); + isb(); +} + static inline int armv8pmu_disable_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); @@ -574,6 +662,16 @@ static inline int armv8pmu_disable_counter(int idx) return idx; } +static inline void armv8pmu_disable_event_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (armv8pmu_event_is_chained(event)) + armv8pmu_disable_counter(idx - 1); + armv8pmu_disable_counter(idx); +} + static inline int armv8pmu_enable_intens(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); @@ -581,6 +679,11 @@ static inline int armv8pmu_enable_intens(int idx) return idx; } +static inline int armv8pmu_enable_event_irq(struct perf_event *event) +{ + return armv8pmu_enable_intens(event->hw.idx); +} + static inline int armv8pmu_disable_intens(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); @@ -593,6 +696,11 @@ static inline int armv8pmu_disable_intens(int idx) return idx; } +static inline int armv8pmu_disable_event_irq(struct perf_event *event) +{ + return armv8pmu_disable_intens(event->hw.idx); +} + static inline u32 armv8pmu_getreset_flags(void) { u32 value; @@ -610,10 +718,8 @@ static inline u32 armv8pmu_getreset_flags(void) static void armv8pmu_enable_event(struct perf_event *event) { unsigned long flags; - struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - int idx = hwc->idx; /* * Enable counter and interrupt, and set the counter to count @@ -624,22 +730,22 @@ static void armv8pmu_enable_event(struct perf_event *event) /* * Disable counter */ - armv8pmu_disable_counter(idx); + armv8pmu_disable_event_counter(event); /* * Set event (if destined for PMNx counters). */ - armv8pmu_write_evtype(idx, hwc->config_base); + armv8pmu_write_event_type(event); /* * Enable interrupt for this counter */ - armv8pmu_enable_intens(idx); + armv8pmu_enable_event_irq(event); /* * Enable counter */ - armv8pmu_enable_counter(idx); + armv8pmu_enable_event_counter(event); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } @@ -647,10 +753,8 @@ static void armv8pmu_enable_event(struct perf_event *event) static void armv8pmu_disable_event(struct perf_event *event) { unsigned long flags; - struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - int idx = hwc->idx; /* * Disable counter and interrupt @@ -660,16 +764,38 @@ static void armv8pmu_disable_event(struct perf_event *event) /* * Disable counter */ - armv8pmu_disable_counter(idx); + armv8pmu_disable_event_counter(event); /* * Disable interrupt for this counter */ - armv8pmu_disable_intens(idx); + armv8pmu_disable_event_irq(event); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } +static void armv8pmu_start(struct arm_pmu *cpu_pmu) +{ + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Enable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void armv8pmu_stop(struct arm_pmu *cpu_pmu) +{ + unsigned long flags; + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Disable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) { u32 pmovsr; @@ -694,6 +820,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) */ regs = get_irq_regs(); + /* + * Stop the PMU while processing the counter overflows + * to prevent skews in group events. + */ + armv8pmu_stop(cpu_pmu); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -718,6 +849,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } + armv8pmu_start(cpu_pmu); /* * Handle the pending perf events. @@ -731,32 +863,42 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) return IRQ_HANDLED; } -static void armv8pmu_start(struct arm_pmu *cpu_pmu) +static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, + struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx; - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Enable all counters */ - armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + return -EAGAIN; } -static void armv8pmu_stop(struct arm_pmu *cpu_pmu) +static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, + struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + int idx; - raw_spin_lock_irqsave(&events->pmu_lock, flags); - /* Disable all counters */ - armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + /* + * Chaining requires two consecutive event counters, where + * the lower idx must be even. + */ + for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { + if (!test_and_set_bit(idx, cpuc->used_mask)) { + /* Check if the preceding even counter is available */ + if (!test_and_set_bit(idx - 1, cpuc->used_mask)) + return idx; + /* Release the Odd counter */ + clear_bit(idx, cpuc->used_mask); + } + } + return -EAGAIN; } static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { - int idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; @@ -770,13 +912,20 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, /* * Otherwise use events counters */ - for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { - if (!test_and_set_bit(idx, cpuc->used_mask)) - return idx; - } + if (armv8pmu_event_is_64bit(event)) + return armv8pmu_get_chain_idx(cpuc, cpu_pmu); + else + return armv8pmu_get_single_idx(cpuc, cpu_pmu); +} - /* The counters are all in use. */ - return -EAGAIN; +static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) +{ + int idx = event->hw.idx; + + clear_bit(idx, cpuc->used_mask); + if (armv8pmu_event_is_chained(event)) + clear_bit(idx - 1, cpuc->used_mask); } /* @@ -851,6 +1000,9 @@ static int __armv8_pmuv3_map_event(struct perf_event *event, &armv8_pmuv3_perf_cache_map, ARMV8_PMU_EVTYPE_EVENT); + if (armv8pmu_event_is_64bit(event)) + event->hw.flags |= ARMPMU_EVT_64BIT; + /* Onl expose micro/arch events supported by this PMU */ if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { @@ -957,10 +1109,10 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->read_counter = armv8pmu_read_counter, cpu_pmu->write_counter = armv8pmu_write_counter, cpu_pmu->get_event_idx = armv8pmu_get_event_idx, + cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx, cpu_pmu->start = armv8pmu_start, cpu_pmu->stop = armv8pmu_stop, cpu_pmu->reset = armv8pmu_reset, - cpu_pmu->max_period = (1LLU << 32) - 1, cpu_pmu->set_event_filter = armv8pmu_set_event_filter; return 0; @@ -1127,3 +1279,32 @@ static int __init armv8_pmu_driver_init(void) return arm_pmu_acpi_probe(armv8_pmuv3_init); } device_initcall(armv8_pmu_driver_init) + +void arch_perf_update_userpage(struct perf_event *event, + struct perf_event_mmap_page *userpg, u64 now) +{ + u32 freq; + u32 shift; + + /* + * Internal timekeeping for enabled/running/stopped times + * is always computed with the sched_clock. + */ + freq = arch_timer_get_rate(); + userpg->cap_user_time = 1; + + clocks_calc_mult_shift(&userpg->time_mult, &shift, freq, + NSEC_PER_SEC, 0); + /* + * time_shift is not expected to be greater than 31 due to + * the original published conversion algorithm shifting a + * 32-bit value (now specifies a 64-bit value) - refer + * perf_event_mmap_page documentation in perf_event.h. + */ + if (shift == 32) { + shift = 31; + userpg->time_mult >>= 1; + } + userpg->time_shift = (u16)shift; + userpg->time_offset = -now; +} diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d849d9804011..e78c3ef04d95 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, break; case KPROBE_HIT_SS: case KPROBE_REENTER: - pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); + pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); break; @@ -395,9 +395,9 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a - * pre-handler and it returned non-zero, it prepped - * for calling the break_handler below on re-entry, - * so get out doing nothing more here. + * pre-handler and it returned non-zero, it will + * modify the execution path and no need to single + * stepping. Let's just reset current kprobe and exit. * * pre_handler can hit a breakpoint and can step thru * before return, keep PSTATE D-flag enabled until @@ -405,16 +405,8 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs, kcb, 0); - return; - } - } - } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) == - BRK64_OPCODE_KPROBES) && cur_kprobe) { - /* We probably hit a jprobe. Call its break handler. */ - if (cur_kprobe->break_handler && - cur_kprobe->break_handler(cur_kprobe, regs)) { - setup_singlestep(cur_kprobe, regs, kcb, 0); - return; + } else + reset_current_kprobe(); } } /* @@ -465,74 +457,6 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) return DBG_HOOK_HANDLED; } -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct jprobe *jp = container_of(p, struct jprobe, kp); - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - kcb->jprobe_saved_regs = *regs; - /* - * Since we can't be sure where in the stack frame "stacked" - * pass-by-value arguments are stored we just don't try to - * duplicate any of the stack. Do not use jprobes on functions that - * use more than 64 bytes (after padding each to an 8 byte boundary) - * of arguments, or pass individual arguments larger than 16 bytes. - */ - - instruction_pointer_set(regs, (unsigned long) jp->entry); - preempt_disable(); - pause_graph_tracing(); - return 1; -} - -void __kprobes jprobe_return(void) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - /* - * Jprobe handler return by entering break exception, - * encoded same as kprobe, but with following conditions - * -a special PC to identify it from the other kprobes. - * -restore stack addr to original saved pt_regs - */ - asm volatile(" mov sp, %0 \n" - "jprobe_return_break: brk %1 \n" - : - : "r" (kcb->jprobe_saved_regs.sp), - "I" (BRK64_ESR_KPROBES) - : "memory"); - - unreachable(); -} - -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -{ - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - long stack_addr = kcb->jprobe_saved_regs.sp; - long orig_sp = kernel_stack_pointer(regs); - struct jprobe *jp = container_of(p, struct jprobe, kp); - extern const char jprobe_return_break[]; - - if (instruction_pointer(regs) != (u64) jprobe_return_break) - return 0; - - if (orig_sp != stack_addr) { - struct pt_regs *saved_regs = - (struct pt_regs *)kcb->jprobe_saved_regs.sp; - pr_err("current sp %lx does not match saved sp %lx\n", - orig_sp, stack_addr); - pr_err("Saved registers for jprobe %p\n", jp); - __show_regs(saved_regs); - pr_err("Current registers\n"); - __show_regs(regs); - BUG(); - } - unpause_graph_tracing(); - *regs = kcb->jprobe_saved_regs; - preempt_enable_no_resched(); - return 1; -} - bool arch_within_kprobe_blacklist(unsigned long addr) { if ((addr >= (unsigned long)__kprobes_text_start && diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index e10bc363f533..7f1628effe6d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -177,16 +177,16 @@ static void print_pstate(struct pt_regs *regs) if (compat_user_mode(regs)) { printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", pstate, - pstate & COMPAT_PSR_N_BIT ? 'N' : 'n', - pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z', - pstate & COMPAT_PSR_C_BIT ? 'C' : 'c', - pstate & COMPAT_PSR_V_BIT ? 'V' : 'v', - pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q', - pstate & COMPAT_PSR_T_BIT ? "T32" : "A32", - pstate & COMPAT_PSR_E_BIT ? "BE" : "LE", - pstate & COMPAT_PSR_A_BIT ? 'A' : 'a', - pstate & COMPAT_PSR_I_BIT ? 'I' : 'i', - pstate & COMPAT_PSR_F_BIT ? 'F' : 'f'); + pstate & PSR_AA32_N_BIT ? 'N' : 'n', + pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', + pstate & PSR_AA32_C_BIT ? 'C' : 'c', + pstate & PSR_AA32_V_BIT ? 'V' : 'v', + pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', + pstate & PSR_AA32_T_BIT ? "T32" : "A32", + pstate & PSR_AA32_E_BIT ? "BE" : "LE", + pstate & PSR_AA32_A_BIT ? 'A' : 'a', + pstate & PSR_AA32_I_BIT ? 'I' : 'i', + pstate & PSR_AA32_F_BIT ? 'F' : 'f'); } else { printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", pstate, @@ -493,3 +493,25 @@ void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; } + +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +void __used stackleak_check_alloca(unsigned long size) +{ + unsigned long stack_left; + unsigned long current_sp = current_stack_pointer; + struct stack_info info; + + BUG_ON(!on_accessible_stack(current, current_sp, &info)); + + stack_left = current_sp - info.low; + + /* + * There's a good chance we're almost out of stack space if this + * is true. Using panic() over BUG() is more likely to give + * reliable debugging output. + */ + if (size >= stack_left) + panic("alloca() over the kernel stack boundary\n"); +} +EXPORT_SYMBOL(stackleak_check_alloca); +#endif diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 5c338ce5a7fa..6219486fa25f 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -132,7 +132,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || - on_irq_stack(addr); + on_irq_stack(addr, NULL); } /** @@ -277,19 +277,22 @@ static int ptrace_hbp_set_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) { - tsk->thread.debug.hbp_break[idx] = bp; - err = 0; - } + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + tsk->thread.debug.hbp_break[idx] = bp; + err = 0; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) { - tsk->thread.debug.hbp_watch[idx] = bp; - err = 0; - } + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + tsk->thread.debug.hbp_watch[idx] = bp; + err = 0; break; } +out: return err; } @@ -1076,6 +1079,7 @@ static int compat_gpr_get(struct task_struct *target, break; case 16: reg = task_pt_regs(target)->pstate; + reg = pstate_to_compat_psr(reg); break; case 17: reg = task_pt_regs(target)->orig_x0; @@ -1143,6 +1147,7 @@ static int compat_gpr_set(struct task_struct *target, newregs.pc = reg; break; case 16: + reg = compat_psr_to_pstate(reg); newregs.pstate = reg; break; case 17: @@ -1629,7 +1634,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, regs->regs[regno] = saved_reg; } -asmlinkage int syscall_trace_enter(struct pt_regs *regs) +int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); @@ -1647,7 +1652,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) return regs->syscallno; } -asmlinkage void syscall_trace_exit(struct pt_regs *regs) +void syscall_trace_exit(struct pt_regs *regs) { audit_syscall_exit(regs); @@ -1656,18 +1661,24 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); + + rseq_syscall(regs); } /* - * Bits which are always architecturally RES0 per ARM DDI 0487A.h + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a + * We also take into account DIT (bit 24), which is not yet documented, and + * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be + * allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. + * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ - GENMASK_ULL(5, 5)) + (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) static int valid_compat_regs(struct user_pt_regs *regs) { @@ -1675,15 +1686,15 @@ static int valid_compat_regs(struct user_pt_regs *regs) if (!system_supports_mixed_endian_el0()) { if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; else - regs->pstate &= ~COMPAT_PSR_E_BIT; + regs->pstate &= ~PSR_AA32_E_BIT; } if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && - (regs->pstate & COMPAT_PSR_A_BIT) == 0 && - (regs->pstate & COMPAT_PSR_I_BIT) == 0 && - (regs->pstate & COMPAT_PSR_F_BIT) == 0) { + (regs->pstate & PSR_AA32_A_BIT) == 0 && + (regs->pstate & PSR_AA32_I_BIT) == 0 && + (regs->pstate & PSR_AA32_F_BIT) == 0) { return 1; } @@ -1691,11 +1702,11 @@ static int valid_compat_regs(struct user_pt_regs *regs) * Force PSR to a valid 32-bit EL0t, preserving the same bits as * arch/arm. */ - regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | - COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | - COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | - COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | - COMPAT_PSR_T_BIT; + regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | + PSR_AA32_C_BIT | PSR_AA32_V_BIT | + PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | + PSR_AA32_GE_MASK | PSR_AA32_E_BIT | + PSR_AA32_T_BIT; regs->pstate |= PSR_MODE32_BIT; return 0; diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 6b8d90d5ceae..5ba4465e44f0 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -13,6 +13,7 @@ #include <asm/mmu.h> #include <asm/ptrace.h> #include <asm/sections.h> +#include <asm/stacktrace.h> #include <asm/sysreg.h> #include <asm/vmap_stack.h> @@ -88,23 +89,52 @@ static int init_sdei_stacks(void) return err; } -bool _on_sdei_stack(unsigned long sp) +static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) { - unsigned long low, high; + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); + unsigned long high = low + SDEI_STACK_SIZE; - if (!IS_ENABLED(CONFIG_VMAP_STACK)) + if (sp < low || sp >= high) return false; - low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); - high = low + SDEI_STACK_SIZE; + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_SDEI_NORMAL; + } - if (low <= sp && sp < high) + return true; +} + +static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) +{ + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); + unsigned long high = low + SDEI_STACK_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_SDEI_CRITICAL; + } + + return true; +} + +bool _on_sdei_stack(unsigned long sp, struct stack_info *info) +{ + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return false; + + if (on_sdei_critical_stack(sp, info)) return true; - low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); - high = low + SDEI_STACK_SIZE; + if (on_sdei_normal_stack(sp, info)) + return true; - return (low <= sp && sp < high); + return false; } unsigned long sdei_arch_get_entry_point(int conduit) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 30ad2f085d1f..5b4fac434c84 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -241,6 +241,44 @@ static void __init request_standard_resources(void) } } +static int __init reserve_memblock_reserved_regions(void) +{ + phys_addr_t start, end, roundup_end = 0; + struct resource *mem, *res; + u64 i; + + for_each_reserved_mem_region(i, &start, &end) { + if (end <= roundup_end) + continue; /* done already */ + + start = __pfn_to_phys(PFN_DOWN(start)); + end = __pfn_to_phys(PFN_UP(end)) - 1; + roundup_end = end; + + res = kzalloc(sizeof(*res), GFP_ATOMIC); + if (WARN_ON(!res)) + return -ENOMEM; + res->start = start; + res->end = end; + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + + mem = request_resource_conflict(&iomem_resource, res); + /* + * We expected memblock_reserve() regions to conflict with + * memory created by request_standard_resources(). + */ + if (WARN_ON_ONCE(!mem)) + continue; + kfree(res); + + reserve_region_with_split(mem, start, end, "reserved"); + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); + u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 511af13e8d8f..5dcc942906db 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -539,8 +539,9 @@ static int restore_sigframe(struct pt_regs *regs, return err; } -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +SYSCALL_DEFINE0(rt_sigreturn) { + struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ @@ -802,6 +803,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) int usig = ksig->sig; int ret; + rseq_signal_deliver(ksig, regs); + /* * Set up the stack frame */ @@ -910,7 +913,7 @@ static void do_signal(struct pt_regs *regs) } asmlinkage void do_notify_resume(struct pt_regs *regs, - unsigned int thread_flags) + unsigned long thread_flags) { /* * The assembly code enters us with IRQs off, but it hasn't @@ -940,6 +943,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, if (thread_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } if (thread_flags & _TIF_FOREIGN_FPSTATE) diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 77b91f478995..24b09003f821 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -243,6 +243,7 @@ static int compat_restore_sigframe(struct pt_regs *regs, int err; sigset_t set; struct compat_aux_sigframe __user *aux; + unsigned long psr; err = get_sigset_t(&set, &sf->uc.uc_sigmask); if (err == 0) { @@ -266,7 +267,9 @@ static int compat_restore_sigframe(struct pt_regs *regs, __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); - __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + __get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); + + regs->pstate = compat_psr_to_pstate(psr); /* * Avoid compat_sys_sigreturn() restarting. @@ -282,8 +285,9 @@ static int compat_restore_sigframe(struct pt_regs *regs, return err; } -asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) +COMPAT_SYSCALL_DEFINE0(sigreturn) { + struct pt_regs *regs = current_pt_regs(); struct compat_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ @@ -312,8 +316,9 @@ badframe: return 0; } -asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { + struct pt_regs *regs = current_pt_regs(); struct compat_rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ @@ -372,22 +377,22 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, { compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); compat_ulong_t retcode; - compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT); + compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT); int thumb; /* Check if the handler is written for ARM or Thumb */ thumb = handler & 1; if (thumb) - spsr |= COMPAT_PSR_T_BIT; + spsr |= PSR_AA32_T_BIT; else - spsr &= ~COMPAT_PSR_T_BIT; + spsr &= ~PSR_AA32_T_BIT; /* The IT state must be cleared for both ARM and Thumb-2 */ - spsr &= ~COMPAT_PSR_IT_MASK; + spsr &= ~PSR_AA32_IT_MASK; /* Restore the original endianness */ - spsr |= COMPAT_PSR_ENDSTATE; + spsr |= PSR_AA32_ENDSTATE; if (ka->sa.sa_flags & SA_RESTORER) { retcode = ptr_to_compat(ka->sa.sa_restorer); @@ -414,6 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct compat_aux_sigframe __user *aux; + unsigned long psr = pstate_to_compat_psr(regs->pstate); int err = 0; __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); @@ -432,7 +438,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); - __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + __put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); /* set the compat FSR WnR */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2faa9863d2e5..25fcd22a4bb2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -225,6 +225,7 @@ asmlinkage notrace void secondary_start_kernel(void) notify_cpu_starting(cpu); store_cpu_topology(cpu); + numa_add_cpu(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for @@ -278,6 +279,9 @@ int __cpu_disable(void) if (ret) return ret; + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. @@ -518,7 +522,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) } bootcpu_valid = true; cpu_madt_gicc[0] = *processor; - early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid)); return; } @@ -541,8 +544,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) */ acpi_set_mailbox_entry(cpu_count, processor); - early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid)); - cpu_count++; } @@ -562,8 +563,34 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, return 0; } + +static void __init acpi_parse_and_init_cpus(void) +{ + int i; + + /* + * do a walk of MADT to determine how many CPUs + * we have including disabled CPUs, and get information + * we need for SMP init. + */ + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + acpi_parse_gic_cpu_interface, 0); + + /* + * In ACPI, SMP and CPU NUMA information is provided in separate + * static tables, namely the MADT and the SRAT. + * + * Thus, it is simpler to first create the cpu logical map through + * an MADT walk and then map the logical cpus to their node ids + * as separate steps. + */ + acpi_map_cpus_to_nodes(); + + for (i = 0; i < nr_cpu_ids; i++) + early_map_cpu_to_node(i, acpi_numa_get_nid(i)); +} #else -#define acpi_table_parse_madt(...) do { } while (0) +#define acpi_parse_and_init_cpus(...) do { } while (0) #endif /* @@ -636,13 +663,7 @@ void __init smp_init_cpus(void) if (acpi_disabled) of_parse_and_init_cpus(); else - /* - * do a walk of MADT to determine how many CPUs - * we have including disabled CPUs, and get information - * we need for SMP init - */ - acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, - acpi_parse_gic_cpu_interface, 0); + acpi_parse_and_init_cpus(); if (cpu_count > nr_cpu_ids) pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", @@ -679,6 +700,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) this_cpu = smp_processor_id(); store_cpu_topology(this_cpu); numa_store_cpu_info(this_cpu); + numa_add_cpu(this_cpu); /* * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d5718a060672..4989f7ea1e59 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -50,7 +50,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) if (!tsk) tsk = current; - if (!on_accessible_stack(tsk, fp)) + if (!on_accessible_stack(tsk, fp, NULL)) return -EINVAL; frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 72981bae10eb..b44065fb1616 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -25,11 +25,13 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/syscalls.h> + #include <asm/cpufeature.h> +#include <asm/syscall.h> -asmlinkage long sys_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t off) +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, off_t, off) { if (offset_in_page(off) != 0) return -EINVAL; @@ -42,24 +44,25 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality) if (personality(personality) == PER_LINUX32 && !system_supports_32bit_el0()) return -EINVAL; - return sys_personality(personality); + return ksys_personality(personality); } /* * Wrappers to pass the pt_regs argument. */ -asmlinkage long sys_rt_sigreturn_wrapper(void); -#define sys_rt_sigreturn sys_rt_sigreturn_wrapper #define sys_personality sys_arm64_personality +asmlinkage long sys_ni_syscall(const struct pt_regs *); +#define __arm64_sys_ni_syscall sys_ni_syscall + #undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = sym, +#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); +#include <asm/unistd.h> -/* - * The sys_call_table array must be 4K aligned to be accessible from - * kernel/entry.S. - */ -void * const sys_call_table[__NR_syscalls] __aligned(4096) = { - [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, + +const syscall_fn_t sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index a40b1343b819..0f8bcb7de700 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -22,31 +22,128 @@ */ #define __COMPAT_SYSCALL_NR +#include <linux/compat.h> #include <linux/compiler.h> #include <linux/syscalls.h> -asmlinkage long compat_sys_sigreturn_wrapper(void); -asmlinkage long compat_sys_rt_sigreturn_wrapper(void); -asmlinkage long compat_sys_statfs64_wrapper(void); -asmlinkage long compat_sys_fstatfs64_wrapper(void); -asmlinkage long compat_sys_pread64_wrapper(void); -asmlinkage long compat_sys_pwrite64_wrapper(void); -asmlinkage long compat_sys_truncate64_wrapper(void); -asmlinkage long compat_sys_ftruncate64_wrapper(void); -asmlinkage long compat_sys_readahead_wrapper(void); -asmlinkage long compat_sys_fadvise64_64_wrapper(void); -asmlinkage long compat_sys_sync_file_range2_wrapper(void); -asmlinkage long compat_sys_fallocate_wrapper(void); -asmlinkage long compat_sys_mmap2_wrapper(void); +#include <asm/syscall.h> -#undef __SYSCALL -#define __SYSCALL(nr, sym) [nr] = sym, +asmlinkage long compat_sys_sigreturn(void); +asmlinkage long compat_sys_rt_sigreturn(void); + +COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname, + compat_size_t, sz, struct compat_statfs64 __user *, buf) +{ + /* + * 32-bit ARM applies an OABI compatibility fixup to statfs64 and + * fstatfs64 regardless of whether OABI is in use, and therefore + * arbitrary binaries may rely upon it, so we must do the same. + * For more details, see commit: + * + * 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and + * fstatfs64") + */ + if (sz == 88) + sz = 84; + + return kcompat_sys_statfs64(pathname, sz, buf); +} + +COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz, + struct compat_statfs64 __user *, buf) +{ + /* see aarch32_statfs64 */ + if (sz == 88) + sz = 84; + + return kcompat_sys_fstatfs64(fd, sz, buf); +} /* - * The sys_call_table array must be 4K aligned to be accessible from - * kernel/entry.S. + * Note: off_4k is always in units of 4K. If we can't do the + * requested offset because it is not page-aligned, we return -EINVAL. */ -void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = { - [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall, +COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off_4k) +{ + if (off_4k & (~PAGE_MASK >> 12)) + return -EINVAL; + + off_4k >>= (PAGE_SHIFT - 12); + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k); +} + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define arg_u32p(name) u32, name##_hi, u32, name##_lo +#else +#define arg_u32p(name) u32, name##_lo, u32, name##_hi +#endif + +#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo) + +COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf, + size_t, count, u32, __pad, arg_u32p(pos)) +{ + return ksys_pread64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd, + const char __user *, buf, size_t, count, u32, __pad, + arg_u32p(pos)) +{ + return ksys_pwrite64(fd, buf, count, arg_u64(pos)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname, + u32, __pad, arg_u32p(length)) +{ + return ksys_truncate(pathname, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad, + arg_u32p(length)) +{ + return ksys_ftruncate(fd, arg_u64(length)); +} + +COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad, + arg_u32p(offset), size_t, count) +{ + return ksys_readahead(fd, arg_u64(offset), count); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags, + arg_u32p(offset), arg_u32p(nbytes)) +{ + return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes), + flags); +} + +COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode, + arg_u32p(offset), arg_u32p(len)) +{ + return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len)); +} + +asmlinkage long sys_ni_syscall(const struct pt_regs *); +#define __arm64_sys_ni_syscall sys_ni_syscall + +#undef __SYSCALL +#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *); +#include <asm/unistd32.h> + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym, + +const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = { + [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall, #include <asm/unistd32.h> }; diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c new file mode 100644 index 000000000000..032d22312881 --- /dev/null +++ b/arch/arm64/kernel/syscall.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/compiler.h> +#include <linux/context_tracking.h> +#include <linux/errno.h> +#include <linux/nospec.h> +#include <linux/ptrace.h> +#include <linux/syscalls.h> + +#include <asm/daifflags.h> +#include <asm/fpsimd.h> +#include <asm/syscall.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> + +long compat_arm_syscall(struct pt_regs *regs); + +long sys_ni_syscall(void); + +asmlinkage long do_ni_syscall(struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + long ret; + if (is_compat_task()) { + ret = compat_arm_syscall(regs); + if (ret != -ENOSYS) + return ret; + } +#endif + + return sys_ni_syscall(); +} + +static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn) +{ + return syscall_fn(regs); +} + +static void invoke_syscall(struct pt_regs *regs, unsigned int scno, + unsigned int sc_nr, + const syscall_fn_t syscall_table[]) +{ + long ret; + + if (scno < sc_nr) { + syscall_fn_t syscall_fn; + syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; + ret = __invoke_syscall(regs, syscall_fn); + } else { + ret = do_ni_syscall(regs); + } + + regs->regs[0] = ret; +} + +static inline bool has_syscall_work(unsigned long flags) +{ + return unlikely(flags & _TIF_SYSCALL_WORK); +} + +int syscall_trace_enter(struct pt_regs *regs); +void syscall_trace_exit(struct pt_regs *regs); + +static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, + const syscall_fn_t syscall_table[]) +{ + unsigned long flags = current_thread_info()->flags; + + regs->orig_x0 = regs->regs[0]; + regs->syscallno = scno; + + local_daif_restore(DAIF_PROCCTX); + user_exit(); + + if (has_syscall_work(flags)) { + /* set default errno for user-issued syscall(-1) */ + if (scno == NO_SYSCALL) + regs->regs[0] = -ENOSYS; + scno = syscall_trace_enter(regs); + if (scno == NO_SYSCALL) + goto trace_exit; + } + + invoke_syscall(regs, scno, sc_nr, syscall_table); + + /* + * The tracing status may have changed under our feet, so we have to + * check again. However, if we were tracing entry, then we always trace + * exit regardless, as the old entry assembly did. + */ + if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { + local_daif_mask(); + flags = current_thread_info()->flags; + if (!has_syscall_work(flags)) { + /* + * We're off to userspace, where interrupts are + * always enabled after we restore the flags from + * the SPSR. + */ + trace_hardirqs_on(); + return; + } + local_daif_restore(DAIF_PROCCTX); + } + +trace_exit: + syscall_trace_exit(regs); +} + +static inline void sve_user_discard(void) +{ + if (!system_supports_sve()) + return; + + clear_thread_flag(TIF_SVE); + + /* + * task_fpsimd_load() won't be called to update CPACR_EL1 in + * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only + * happens if a context switch or kernel_neon_begin() or context + * modification (sigreturn, ptrace) intervenes. + * So, ensure that CPACR_EL1 is already correct for the fast-path case. + */ + sve_user_disable(); +} + +asmlinkage void el0_svc_handler(struct pt_regs *regs) +{ + sve_user_discard(); + el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); +} + +#ifdef CONFIG_COMPAT +asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) +{ + el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, + compat_sys_call_table); +} +#endif diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index f845a8617812..0825c4a856e3 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -215,11 +215,16 @@ EXPORT_SYMBOL_GPL(cpu_topology); const struct cpumask *cpu_coregroup_mask(int cpu) { - const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling; + const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); + /* Find the smaller of NUMA, core or LLC siblings */ + if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { + /* not numa in package, lets use the package siblings */ + core_mask = &cpu_topology[cpu].core_sibling; + } if (cpu_topology[cpu].llc_id != -1) { - if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask)) - core_mask = &cpu_topology[cpu].llc_siblings; + if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) + core_mask = &cpu_topology[cpu].llc_sibling; } return core_mask; @@ -231,27 +236,25 @@ static void update_siblings_masks(unsigned int cpuid) int cpu; /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; if (cpuid_topo->llc_id == cpu_topo->llc_id) { - cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings); - cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings); + cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); + cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } if (cpuid_topo->package_id != cpu_topo->package_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); if (cpuid_topo->core_id != cpu_topo->core_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); } } @@ -293,6 +296,19 @@ topology_populated: update_siblings_masks(cpuid); } +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + static void __init reset_cpu_topology(void) { unsigned int cpu; @@ -303,18 +319,26 @@ static void __init reset_cpu_topology(void) cpu_topo->thread_id = -1; cpu_topo->core_id = 0; cpu_topo->package_id = -1; - cpu_topo->llc_id = -1; - cpumask_clear(&cpu_topo->llc_siblings); - cpumask_set_cpu(cpu, &cpu_topo->llc_siblings); - cpumask_clear(&cpu_topo->core_sibling); - cpumask_set_cpu(cpu, &cpu_topo->core_sibling); - cpumask_clear(&cpu_topo->thread_sibling); - cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); + clear_cpu_topology(cpu); } } +void remove_cpu_topology(unsigned int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + #ifdef CONFIG_ACPI /* * Propagate the topology information of the processor_topology_node tree to the diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d399d459397b..039e9ff379cc 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -411,7 +411,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) { - config_sctlr_el1(SCTLR_EL1_UCI, 0); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); } #define __user_cache_maint(insn, address, res) \ @@ -547,22 +547,6 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) do_undefinstr(regs); } -long compat_arm_syscall(struct pt_regs *regs); - -asmlinkage long do_ni_syscall(struct pt_regs *regs) -{ -#ifdef CONFIG_COMPAT - long ret; - if (is_compat_task()) { - ret = compat_arm_syscall(regs); - if (ret != -ENOSYS) - return ret; - } -#endif - - return sys_ni_syscall(); -} - static const char *esr_class_str[] = { [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S index b82c85e5d972..e20483b104d9 100644 --- a/arch/arm64/kernel/vdso/note.S +++ b/arch/arm64/kernel/vdso/note.S @@ -22,7 +22,10 @@ #include <linux/uts.h> #include <linux/version.h> #include <linux/elfnote.h> +#include <linux/build-salt.h> ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END + +BUILD_SALT diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 56a0260ceb11..07256b08226c 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { - u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; + u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_USR: - case COMPAT_PSR_MODE_FIQ: - case COMPAT_PSR_MODE_IRQ: - case COMPAT_PSR_MODE_SVC: - case COMPAT_PSR_MODE_ABT: - case COMPAT_PSR_MODE_UND: + case PSR_AA32_MODE_USR: + case PSR_AA32_MODE_FIQ: + case PSR_AA32_MODE_IRQ: + case PSR_AA32_MODE_SVC: + case PSR_AA32_MODE_ABT: + case PSR_AA32_MODE_UND: case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: @@ -289,6 +289,39 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return -EINVAL; } +int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events) +{ + events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); + events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); + + if (events->exception.serror_pending && events->exception.serror_has_esr) + events->exception.serror_esr = vcpu_get_vsesr(vcpu); + + return 0; +} + +int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events) +{ + bool serror_pending = events->exception.serror_pending; + bool has_esr = events->exception.serror_has_esr; + + if (serror_pending && has_esr) { + if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) + return -EINVAL; + + if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) + kvm_set_sei_esr(vcpu, events->exception.serror_esr); + else + return -EINVAL; + } else if (serror_pending) { + kvm_inject_vabt(vcpu); + } + + return 0; +} + int __attribute_const__ kvm_target_cpu(void) { unsigned long implementor = read_cpuid_implementor(); diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 6fd91b31a131..ea9225160786 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -57,6 +57,7 @@ __invalid: * x0: HYP pgd * x1: HYP stack * x2: HYP vectors + * x3: per-CPU offset */ __do_hyp_init: /* Check for a stub HVC call */ @@ -119,9 +120,8 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) mov sp, x1 msr vbar_el2, x2 - /* copy tpidr_el1 into tpidr_el2 for use by HYP */ - mrs x1, tpidr_el1 - msr tpidr_el2, x1 + /* Set tpidr_el2 for use by HYP */ + msr tpidr_el2, x3 /* Hello, World! */ eret diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 4313f7475333..2fabc2dc1966 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -3,7 +3,8 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # -ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \ + $(DISABLE_STACKLEAK_PLUGIN) KVM=../../../../virt/kvm diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 35bc16832efe..9ce223944983 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -288,8 +288,3 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) vcpu->arch.sysregs_loaded_on_cpu = false; } - -void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2) -{ - asm("msr tpidr_el2, %0": : "r" (tpidr_el2)); -} diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 39be799d0417..215c7c0eb3b0 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -27,7 +27,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT); + return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT); return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); } diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index d8e71659ba7e..a55e91dfcf8f 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } -static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) +void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr) { - vcpu_set_vsesr(vcpu, esr); + vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); *vcpu_hcr(vcpu) |= HCR_VSE; } @@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) */ void kvm_inject_vabt(struct kvm_vcpu *vcpu) { - pend_guest_serror(vcpu, ESR_ELx_ISV); + kvm_set_sei_esr(vcpu, ESR_ELx_ISV); } diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c index eefe403a2e63..7a5173ea2276 100644 --- a/arch/arm64/kvm/regmap.c +++ b/arch/arm64/kvm/regmap.c @@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) { unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; - unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: + case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC: mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ break; - case COMPAT_PSR_MODE_ABT: + case PSR_AA32_MODE_ABT: mode = 4; break; - case COMPAT_PSR_MODE_UND: + case PSR_AA32_MODE_UND: mode = 5; break; - case COMPAT_PSR_MODE_SYS: + case PSR_AA32_MODE_SYS: mode = 0; /* SYS maps to USR */ break; @@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) */ static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu) { - unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; switch (mode) { - case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC; - case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT; - case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND; - case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ; - case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ; + case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC; + case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT; + case PSR_AA32_MODE_UND: return KVM_SPSR_UND; + case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ; + case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ; default: BUG(); } } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index a74311beda35..e37c78bbe1ca 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = { }; static const struct kvm_regs default_regs_reset32 = { - .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT | - COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), + .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | + PSR_AA32_I_BIT | PSR_AA32_F_BIT), }; static bool cpu_has_32bit_el1(void) @@ -77,8 +77,12 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); break; + case KVM_CAP_ARM_INJECT_SERROR_ESR: + r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); + break; case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: + case KVM_CAP_VCPU_EVENTS: r = 1; break; default: diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index a4363735d3f8..22fbbdbece3c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -194,7 +194,16 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, if (!p->is_write) return read_from_write_only(vcpu, p, r); - kvm_set_way_flush(vcpu); + /* + * Only track S/W ops if we don't have FWB. It still indicates + * that the guest is a bit broken (S/W operations should only + * be done by firmware, knowing that there is only a single + * CPU left in the system, and certainly not from non-secure + * software). + */ + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + kvm_set_way_flush(vcpu); + return true; } @@ -243,10 +252,43 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { + bool g1; + if (!p->is_write) return read_from_write_only(vcpu, p, r); - vgic_v3_dispatch_sgi(vcpu, p->regval); + /* + * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates + * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, + * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively + * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure + * group. + */ + if (p->is_aarch32) { + switch (p->Op1) { + default: /* Keep GCC quiet */ + case 0: /* ICC_SGI1R */ + g1 = true; + break; + case 1: /* ICC_ASGI1R */ + case 2: /* ICC_SGI0R */ + g1 = false; + break; + } + } else { + switch (p->Op2) { + default: /* Keep GCC quiet */ + case 5: /* ICC_SGI1R_EL1 */ + g1 = true; + break; + case 6: /* ICC_ASGI1R_EL1 */ + case 7: /* ICC_SGI0R_EL1 */ + g1 = false; + break; + } + } + + vgic_v3_dispatch_sgi(vcpu, p->regval, g1); return true; } @@ -1303,6 +1345,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, + { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, + { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, @@ -1613,8 +1657,6 @@ static const struct sys_reg_desc cp14_64_regs[] = { * register). */ static const struct sys_reg_desc cp15_regs[] = { - { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, - { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, @@ -1737,8 +1779,10 @@ static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, - { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, + { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, + { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ + { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval }, }; diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 137710f4dac3..68755fd70dcf 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ +lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ clear_page.o memchr.o memcpy.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S deleted file mode 100644 index 43ac736baa5b..000000000000 --- a/arch/arm64/lib/bitops.S +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Based on arch/arm/lib/bitops.h - * - * Copyright (C) 2013 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/lse.h> - -/* - * x0: bits 5:0 bit offset - * bits 31:6 word offset - * x1: address - */ - .macro bitop, name, llsc, lse -ENTRY( \name ) - and w3, w0, #63 // Get bit offset - eor w0, w0, w3 // Clear low bits - mov x2, #1 - add x1, x1, x0, lsr #3 // Get word offset -alt_lse " prfm pstl1strm, [x1]", "nop" - lsl x3, x2, x3 // Create mask - -alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]" -alt_lse " \llsc x2, x2, x3", "nop" -alt_lse " stxr w0, x2, [x1]", "nop" -alt_lse " cbnz w0, 1b", "nop" - - ret -ENDPROC(\name ) - .endm - - .macro testop, name, llsc, lse -ENTRY( \name ) - and w3, w0, #63 // Get bit offset - eor w0, w0, w3 // Clear low bits - mov x2, #1 - add x1, x1, x0, lsr #3 // Get word offset -alt_lse " prfm pstl1strm, [x1]", "nop" - lsl x4, x2, x3 // Create mask - -alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]" - lsr x0, x2, x3 -alt_lse " \llsc x2, x2, x4", "nop" -alt_lse " stlxr w5, x2, [x1]", "nop" -alt_lse " cbnz w5, 1b", "nop" -alt_lse " dmb ish", "nop" - - and x0, x0, #1 - ret -ENDPROC(\name ) - .endm - -/* - * Atomic bit operations. - */ - bitop change_bit, eor, steor - bitop clear_bit, bic, stclr - bitop set_bit, orr, stset - - testop test_and_change_bit, eor, ldeoral - testop test_and_clear_bit, bic, ldclral - testop test_and_set_bit, orr, ldsetal diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 30334d81b021..0c22ede52f90 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -35,7 +35,7 @@ * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(flush_icache_range) +ENTRY(__flush_icache_range) /* FALLTHROUGH */ /* @@ -77,7 +77,7 @@ alternative_else_nop_endif 9: mov x0, #-EFAULT b 1b -ENDPROC(flush_icache_range) +ENDPROC(__flush_icache_range) ENDPROC(__flush_cache_user_range) /* diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 61e93f0b5482..072c51fb07d7 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -355,7 +355,7 @@ static int __init atomic_pool_init(void) if (dev_get_cma_area(NULL)) page = dma_alloc_from_contiguous(NULL, nr_pages, - pool_size_order, GFP_KERNEL); + pool_size_order, false); else page = alloc_pages(GFP_DMA32, pool_size_order); @@ -573,7 +573,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, struct page *page; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), gfp); + get_order(size), gfp & __GFP_NOWARN); if (!page) return NULL; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b8eecc7b9531..50b30ff30de4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -379,12 +379,12 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -static int __do_page_fault(struct mm_struct *mm, unsigned long addr, +static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { struct vm_area_struct *vma; - int fault; + vm_fault_t fault; vma = find_vma(mm, addr); fault = VM_FAULT_BADMAP; @@ -427,7 +427,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; struct siginfo si; - int fault, major = 0; + vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -727,12 +727,7 @@ static const struct fault_info fault_info[] = { int handle_guest_sea(phys_addr_t addr, unsigned int esr) { - int ret = -ENOENT; - - if (IS_ENABLED(CONFIG_ACPI_APEI_SEA)) - ret = ghes_notify_sea(); - - return ret; + return ghes_notify_sea(); } asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, @@ -879,7 +874,7 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) */ WARN_ON_ONCE(in_interrupt()); - config_sctlr_el1(SCTLR_EL1_SPAN, 0); + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); asm(SET_PSTATE_PAN(1)); } #endif /* CONFIG_ARM64_PAN */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 1059884f9a6f..30695a868107 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -66,6 +66,7 @@ void __sync_icache_dcache(pte_t pte) sync_icache_aliases(page_address(page), PAGE_SIZE << compound_order(page)); } +EXPORT_SYMBOL_GPL(__sync_icache_dcache); /* * This function is called when a page has been modified by the kernel. Mark @@ -82,7 +83,7 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ -EXPORT_SYMBOL(flush_icache_range); +EXPORT_SYMBOL(__flush_icache_range); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index ecc6818191df..192b3ba07075 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; pte_t orig_pte = huge_ptep_get(ptep); bool valid = pte_valid(orig_pte); unsigned long i, saddr = addr; @@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm, orig_pte = pte_mkdirty(orig_pte); } - if (valid) + if (valid) { + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); flush_tlb_range(&vma, saddr, addr); + } return orig_pte; } @@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); unsigned long i, saddr = addr; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 325cfb3b858a..787e27964ab9 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -287,7 +287,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_map_memory(pfn << PAGE_SHIFT); + phys_addr_t addr = pfn << PAGE_SHIFT; + + if ((addr >> PAGE_SHIFT) != pfn) + return 0; + return memblock_is_map_memory(addr); } EXPORT_SYMBOL(pfn_valid); #endif @@ -611,11 +615,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Make sure we chose the upper bound of sizeof(struct page) - * correctly. + * correctly when sizing the VMEMMAP array. */ BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); +#endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 493ff75670ff..65f86271f02b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -45,6 +45,7 @@ #include <asm/memblock.h> #include <asm/mmu_context.h> #include <asm/ptdump.h> +#include <asm/tlbflush.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -977,12 +978,51 @@ int pmd_clear_huge(pmd_t *pmdp) return 1; } -int pud_free_pmd_page(pud_t *pud) +int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) { - return pud_none(*pud); + pte_t *table; + pmd_t pmd; + + pmd = READ_ONCE(*pmdp); + + /* No-op for empty entry and WARN_ON for valid entry */ + if (!pmd_present(pmd) || !pmd_table(pmd)) { + VM_WARN_ON(!pmd_table(pmd)); + return 1; + } + + table = pte_offset_kernel(pmdp, addr); + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable(addr); + pte_free_kernel(NULL, table); + return 1; } -int pmd_free_pte_page(pmd_t *pmd) +int pud_free_pmd_page(pud_t *pudp, unsigned long addr) { - return pmd_none(*pmd); + pmd_t *table; + pmd_t *pmdp; + pud_t pud; + unsigned long next, end; + + pud = READ_ONCE(*pudp); + + /* No-op for empty entry and WARN_ON for valid entry */ + if (!pud_present(pud) || !pud_table(pud)) { + VM_WARN_ON(!pud_table(pud)); + return 1; + } + + table = pmd_offset(pudp, addr); + pmdp = table; + next = addr; + end = addr + PUD_SIZE; + do { + pmd_free_pte_page(pmdp, next); + } while (pmdp++, next += PMD_SIZE, next != end); + + pud_clear(pudp); + __flush_tlb_kernel_pgtable(addr); + pmd_free(NULL, table); + return 1; } diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index dad128ba98bf..146c04ceaa51 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -70,19 +70,32 @@ EXPORT_SYMBOL(cpumask_of_node); #endif -static void map_cpu_to_node(unsigned int cpu, int nid) +static void numa_update_cpu(unsigned int cpu, bool remove) { - set_cpu_numa_node(cpu, nid); - if (nid >= 0) + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); } -void numa_clear_node(unsigned int cpu) +void numa_add_cpu(unsigned int cpu) { - int nid = cpu_to_node(cpu); + numa_update_cpu(cpu, false); +} - if (nid >= 0) - cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); set_cpu_numa_node(cpu, NUMA_NO_NODE); } @@ -116,7 +129,7 @@ static void __init setup_node_to_cpumask_map(void) */ void numa_store_cpu_info(unsigned int cpu) { - map_cpu_to_node(cpu, cpu_to_node_map[cpu]); + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); } void __init early_map_cpu_to_node(unsigned int cpu, int nid) diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 02b18f8b2905..24d786fc3a4c 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -10,18 +10,7 @@ static int ptdump_show(struct seq_file *m, void *v) ptdump_walk_pgd(m, info); return 0; } - -static int ptdump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ptdump_show, inode->i_private); -} - -static const struct file_operations ptdump_fops = { - .open = ptdump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(ptdump); int ptdump_debugfs_register(struct ptdump_info *info, const char *name) { |