From d75ba503a972df09784f1a332ba356ef8b42a0a6 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:39 +0200 Subject: ARM: prepare armv7.h to be included from assembly source armv7.h contains some useful constants, but also C prototypes. To include it also in assembly files, protect the non-assembly part appropriately. Signed-off-by: Andre Przywara --- arch/arm/include/asm/armv7.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h index 392d6a2db5..0f7cbbfc12 100644 --- a/arch/arm/include/asm/armv7.h +++ b/arch/arm/include/asm/armv7.h @@ -7,7 +7,6 @@ */ #ifndef ARMV7_H #define ARMV7_H -#include /* Cortex-A9 revisions */ #define MIDR_CORTEX_A9_R0P1 0x410FC091 @@ -41,6 +40,9 @@ #define ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA 3 #define ARMV7_CLIDR_CTYPE_UNIFIED 4 +#ifndef __ASSEMBLY__ +#include + /* * CP15 Barrier instructions * Please note that we have separate barrier instructions in ARMv7 @@ -58,4 +60,6 @@ void v7_outer_cache_inval_all(void); void v7_outer_cache_flush_range(u32 start, u32 end); void v7_outer_cache_inval_range(u32 start, u32 end); +#endif /* ! __ASSEMBLY__ */ + #endif -- cgit v1.2.1 From 45b940d6f9a9d4989452ea67480e299bfa51ee19 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:40 +0200 Subject: ARM: add secure monitor handler to switch to non-secure state A prerequisite for using virtualization is to be in HYP mode, which requires the CPU to be in non-secure state first. Add a new file in arch/arm/cpu/armv7 to hold a monitor handler routine which switches the CPU to non-secure state by setting the NS and associated bits. According to the ARM architecture reference manual this should not be done in SVC mode, so we have to setup a SMC handler for this. We create a new vector table to avoid interference with other boards. The MVBAR register will be programmed later just before the smc call. Signed-off-by: Andre Przywara --- arch/arm/cpu/armv7/Makefile | 4 +++ arch/arm/cpu/armv7/nonsec_virt.S | 53 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 arch/arm/cpu/armv7/nonsec_virt.S (limited to 'arch') diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile index b723e22a5c..3466c7ac11 100644 --- a/arch/arm/cpu/armv7/Makefile +++ b/arch/arm/cpu/armv7/Makefile @@ -20,6 +20,10 @@ ifneq ($(CONFIG_AM43XX)$(CONFIG_AM33XX)$(CONFIG_OMAP44XX)$(CONFIG_OMAP54XX)$(CON SOBJS += lowlevel_init.o endif +ifneq ($(CONFIG_ARMV7_NONSEC),) +SOBJS += nonsec_virt.o +endif + SRCS := $(START:.o=.S) $(COBJS:.o=.c) OBJS := $(addprefix $(obj),$(COBJS) $(SOBJS)) START := $(addprefix $(obj),$(START)) diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S new file mode 100644 index 0000000000..d11eb2dc63 --- /dev/null +++ b/arch/arm/cpu/armv7/nonsec_virt.S @@ -0,0 +1,53 @@ +/* + * code for switching cores into non-secure state + * + * Copyright (c) 2013 Andre Przywara + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include + +/* the vector table for secure state */ +_monitor_vectors: + .word 0 /* reset */ + .word 0 /* undef */ + adr pc, _secure_monitor + .word 0 + .word 0 + .word 0 + .word 0 + .word 0 + +/* + * secure monitor handler + * U-boot calls this "software interrupt" in start.S + * This is executed on a "smc" instruction, we use a "smc #0" to switch + * to non-secure state. + * We use only r0 and r1 here, due to constraints in the caller. + */ + .align 5 +_secure_monitor: + mrc p15, 0, r1, c1, c1, 0 @ read SCR + bic r1, r1, #0x4e @ clear IRQ, FIQ, EA, nET bits + orr r1, r1, #0x31 @ enable NS, AW, FW bits + + mcr p15, 0, r1, c1, c1, 0 @ write SCR (with NS bit set) + + movs pc, lr @ return to non-secure SVC -- cgit v1.2.1 From 16212b594f385bd594d5d316bf11b13c1186e3d7 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:41 +0200 Subject: ARM: add assembly routine to switch to non-secure state While actually switching to non-secure state is one thing, another part of this process is to make sure that we still have full access to the interrupt controller (GIC). The GIC is fully aware of secure vs. non-secure state, some registers are banked, others may be configured to be accessible from secure state only. To be as generic as possible, we get the GIC memory mapped address based on the PERIPHBASE value in the CBAR register. Since this register is not architecturally defined, we check the MIDR before to be from an A15 or A7. For CPUs not having the CBAR or boards with wrong information herein we allow providing the base address as a configuration variable. Now that we know the GIC address, we: a) allow private interrupts to be delivered to the core (GICD_IGROUPR0 = 0xFFFFFFFF) b) enable the CPU interface (GICC_CTLR[0] = 1) c) set the priority filter to allow non-secure interrupts (GICC_PMR = 0xFF) Also we allow access to all coprocessor interfaces from non-secure state by writing the appropriate bits in the NSACR register. The generic timer base frequency register is only accessible from secure state, so we have to program it now. Actually this should be done from primary firmware before, but some boards seems to omit this, so if needed we do this here with a board specific value. The Versatile Express board does not need this, so we remove the frequency from the configuration file here. After having switched to non-secure state, we also enable the non-secure GIC CPU interface, since this register is banked. Since we need to call this routine also directly from the smp_pen later (where we don't have any stack), we can only use caller saved registers r0-r3 and r12 to not mess with the compiler. Signed-off-by: Andre Przywara --- arch/arm/cpu/armv7/nonsec_virt.S | 87 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/armv7.h | 21 ++++++++++ arch/arm/include/asm/gic.h | 17 ++++++++ 3 files changed, 125 insertions(+) create mode 100644 arch/arm/include/asm/gic.h (limited to 'arch') diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S index d11eb2dc63..3dd60b7137 100644 --- a/arch/arm/cpu/armv7/nonsec_virt.S +++ b/arch/arm/cpu/armv7/nonsec_virt.S @@ -23,6 +23,11 @@ */ #include +#include +#include +#include + +.arch_extension sec /* the vector table for secure state */ _monitor_vectors: @@ -51,3 +56,85 @@ _secure_monitor: mcr p15, 0, r1, c1, c1, 0 @ write SCR (with NS bit set) movs pc, lr @ return to non-secure SVC + +/* + * Switch a core to non-secure state. + * + * 1. initialize the GIC per-core interface + * 2. allow coprocessor access in non-secure modes + * 3. switch the cpu mode (by calling "smc #0") + * + * Called from smp_pen by secondary cores and directly by the BSP. + * Do not assume that the stack is available and only use registers + * r0-r3 and r12. + * + * PERIPHBASE is used to get the GIC address. This could be 40 bits long, + * though, but we check this in C before calling this function. + */ +ENTRY(_nonsec_init) +#ifdef CONFIG_ARM_GIC_BASE_ADDRESS + ldr r2, =CONFIG_ARM_GIC_BASE_ADDRESS +#else + mrc p15, 4, r2, c15, c0, 0 @ read CBAR + bfc r2, #0, #15 @ clear reserved bits +#endif + add r3, r2, #GIC_DIST_OFFSET @ GIC dist i/f offset + mvn r1, #0 @ all bits to 1 + str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts + + mrc p15, 0, r0, c0, c0, 0 @ read MIDR + ldr r1, =MIDR_PRIMARY_PART_MASK + and r0, r0, r1 @ mask out variant and revision + + ldr r1, =MIDR_CORTEX_A7_R0P0 & MIDR_PRIMARY_PART_MASK + cmp r0, r1 @ check for Cortex-A7 + + ldr r1, =MIDR_CORTEX_A15_R0P0 & MIDR_PRIMARY_PART_MASK + cmpne r0, r1 @ check for Cortex-A15 + + movne r1, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9 + moveq r1, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7 + add r3, r2, r1 @ r3 = GIC CPU i/f addr + + mov r1, #1 @ set GICC_CTLR[enable] + str r1, [r3, #GICC_CTLR] @ and clear all other bits + mov r1, #0xff + str r1, [r3, #GICC_PMR] @ set priority mask register + + movw r1, #0x3fff + movt r1, #0x0006 + mcr p15, 0, r1, c1, c1, 2 @ NSACR = all copros to non-sec + +/* The CNTFRQ register of the generic timer needs to be + * programmed in secure state. Some primary bootloaders / firmware + * omit this, so if the frequency is provided in the configuration, + * we do this here instead. + * But first check if we have the generic timer. + */ +#ifdef CONFIG_SYS_CLK_FREQ + mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 + and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits + cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT) + ldreq r1, =CONFIG_SYS_CLK_FREQ + mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ +#endif + + adr r1, _monitor_vectors + mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors + + mrc p15, 0, ip, c12, c0, 0 @ save secure copy of VBAR + + isb + smc #0 @ call into MONITOR mode + + mcr p15, 0, ip, c12, c0, 0 @ write non-secure copy of VBAR + + mov r1, #1 + str r1, [r3, #GICC_CTLR] @ enable non-secure CPU i/f + add r2, r2, #GIC_DIST_OFFSET + str r1, [r2, #GICD_CTLR] @ allow private interrupts + + mov r0, r3 @ return GICC address + + bx lr +ENDPROC(_nonsec_init) diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h index 0f7cbbfc12..3dcfc8fb29 100644 --- a/arch/arm/include/asm/armv7.h +++ b/arch/arm/include/asm/armv7.h @@ -18,6 +18,22 @@ #define MIDR_CORTEX_A15_R0P0 0x410FC0F0 #define MIDR_CORTEX_A15_R2P2 0x412FC0F2 +/* Cortex-A7 revisions */ +#define MIDR_CORTEX_A7_R0P0 0x410FC070 + +#define MIDR_PRIMARY_PART_MASK 0xFF0FFFF0 + +/* ID_PFR1 feature fields */ +#define CPUID_ARM_SEC_SHIFT 4 +#define CPUID_ARM_SEC_MASK (0xF << CPUID_ARM_SEC_SHIFT) +#define CPUID_ARM_VIRT_SHIFT 12 +#define CPUID_ARM_VIRT_MASK (0xF << CPUID_ARM_VIRT_SHIFT) +#define CPUID_ARM_GENTIMER_SHIFT 16 +#define CPUID_ARM_GENTIMER_MASK (0xF << CPUID_ARM_GENTIMER_SHIFT) + +/* valid bits in CBAR register / PERIPHBASE value */ +#define CBAR_MASK 0xFFFF8000 + /* CCSIDR */ #define CCSIDR_LINE_SIZE_OFFSET 0 #define CCSIDR_LINE_SIZE_MASK 0x7 @@ -60,6 +76,11 @@ void v7_outer_cache_inval_all(void); void v7_outer_cache_flush_range(u32 start, u32 end); void v7_outer_cache_inval_range(u32 start, u32 end); +#ifdef CONFIG_ARMV7_NONSEC +/* defined in assembly file */ +unsigned int _nonsec_init(void); +#endif /* CONFIG_ARMV7_NONSEC */ + #endif /* ! __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/gic.h b/arch/arm/include/asm/gic.h new file mode 100644 index 0000000000..c2b1e28624 --- /dev/null +++ b/arch/arm/include/asm/gic.h @@ -0,0 +1,17 @@ +#ifndef __GIC_V2_H__ +#define __GIC_V2_H__ + +/* register offsets for the ARM generic interrupt controller (GIC) */ + +#define GIC_DIST_OFFSET 0x1000 +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IGROUPRn 0x0080 +#define GICD_SGIR 0x0F00 + +#define GIC_CPU_OFFSET_A9 0x0100 +#define GIC_CPU_OFFSET_A15 0x2000 +#define GICC_CTLR 0x0000 +#define GICC_PMR 0x0004 + +#endif -- cgit v1.2.1 From 1ef923851ab8ffcc4265fd991815b88d9c1f12d7 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:42 +0200 Subject: ARM: add C function to switch to non-secure state The core specific part of the work is done in the assembly routine in nonsec_virt.S, introduced with the previous patch, but for the full glory we need to setup the GIC distributor interface once for the whole system, which is done in C here. The routine is placed in arch/arm/cpu/armv7 to allow easy access from other ARMv7 boards. We check the availability of the security extensions first. Since we need a safe way to access the GIC, we use the PERIPHBASE registers on Cortex-A15 and A7 CPUs and do some sanity checks. Boards not implementing the CBAR can override this value via a configuration file variable. Then we actually do the GIC enablement: a) enable the GIC distributor, both for non-secure and secure state (GICD_CTLR[1:0] = 11b) b) allow all interrupts to be handled from non-secure state (GICD_IGROUPRn = 0xFFFFFFFF) The core specific GIC setup is then done in the assembly routine. Signed-off-by: Andre Przywara --- arch/arm/cpu/armv7/Makefile | 1 + arch/arm/cpu/armv7/virt-v7.c | 122 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/armv7.h | 3 ++ 3 files changed, 126 insertions(+) create mode 100644 arch/arm/cpu/armv7/virt-v7.c (limited to 'arch') diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile index 3466c7ac11..024c28b6e3 100644 --- a/arch/arm/cpu/armv7/Makefile +++ b/arch/arm/cpu/armv7/Makefile @@ -22,6 +22,7 @@ endif ifneq ($(CONFIG_ARMV7_NONSEC),) SOBJS += nonsec_virt.o +COBJS += virt-v7.o endif SRCS := $(START:.o=.S) $(COBJS:.o=.c) diff --git a/arch/arm/cpu/armv7/virt-v7.c b/arch/arm/cpu/armv7/virt-v7.c new file mode 100644 index 0000000000..068ac85225 --- /dev/null +++ b/arch/arm/cpu/armv7/virt-v7.c @@ -0,0 +1,122 @@ +/* + * (C) Copyright 2013 + * Andre Przywara, Linaro + * + * Routines to transition ARMv7 processors from secure into non-secure state + * needed to enable ARMv7 virtualization for current hypervisors + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include +#include +#include +#include + +unsigned long gic_dist_addr; + +static unsigned int read_id_pfr1(void) +{ + unsigned int reg; + + asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); + return reg; +} + +static unsigned long get_gicd_base_address(void) +{ +#ifdef CONFIG_ARM_GIC_BASE_ADDRESS + return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET; +#else + unsigned midr; + unsigned periphbase; + + /* check whether we are an Cortex-A15 or A7. + * The actual HYP switch should work with all CPUs supporting + * the virtualization extension, but we need the GIC address, + * which we know only for sure for those two CPUs. + */ + asm("mrc p15, 0, %0, c0, c0, 0\n" : "=r"(midr)); + switch (midr & MIDR_PRIMARY_PART_MASK) { + case MIDR_CORTEX_A9_R0P1: + case MIDR_CORTEX_A15_R0P0: + case MIDR_CORTEX_A7_R0P0: + break; + default: + printf("nonsec: could not determine GIC address.\n"); + return -1; + } + + /* get the GIC base address from the CBAR register */ + asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase)); + + /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to + * encode this). Bail out here since we cannot access this without + * enabling paging. + */ + if ((periphbase & 0xff) != 0) { + printf("nonsec: PERIPHBASE is above 4 GB, no access.\n"); + return -1; + } + + return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET; +#endif +} + +int armv7_switch_nonsec(void) +{ + unsigned int reg; + unsigned itlinesnr, i; + + /* check whether the CPU supports the security extensions */ + reg = read_id_pfr1(); + if ((reg & 0xF0) == 0) { + printf("nonsec: Security extensions not implemented.\n"); + return -1; + } + + /* the SCR register will be set directly in the monitor mode handler, + * according to the spec one should not tinker with it in secure state + * in SVC mode. Do not try to read it once in non-secure state, + * any access to it will trap. + */ + + gic_dist_addr = get_gicd_base_address(); + if (gic_dist_addr == -1) + return -1; + + /* enable the GIC distributor */ + writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, + gic_dist_addr + GICD_CTLR); + + /* TYPER[4:0] contains an encoded number of available interrupts */ + itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; + + /* set all bits in the GIC group registers to one to allow access + * from non-secure state. The first 32 interrupts are private per + * CPU and will be set later when enabling the GIC for each core + */ + for (i = 1; i <= itlinesnr; i++) + writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); + + /* call the non-sec switching code on this CPU */ + _nonsec_init(); + + return 0; +} diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h index 3dcfc8fb29..b352d431b1 100644 --- a/arch/arm/include/asm/armv7.h +++ b/arch/arm/include/asm/armv7.h @@ -77,6 +77,9 @@ void v7_outer_cache_flush_range(u32 start, u32 end); void v7_outer_cache_inval_range(u32 start, u32 end); #ifdef CONFIG_ARMV7_NONSEC + +int armv7_switch_nonsec(void); + /* defined in assembly file */ unsigned int _nonsec_init(void); #endif /* CONFIG_ARMV7_NONSEC */ -- cgit v1.2.1 From bb975455650b1f36681de31a93ffe54952ed3a6b Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:43 +0200 Subject: ARM: trigger non-secure state switch during bootm execution To actually trigger the non-secure switch we just implemented, call the switching routine from within the bootm command implementation. This way we automatically enable this feature without further user intervention. Signed-off-by: Andre Przywara --- arch/arm/lib/bootm.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index eefb456efb..b3a961a825 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -22,6 +22,10 @@ #include #include +#ifdef CONFIG_ARMV7_NONSEC +#include +#endif + DECLARE_GLOBAL_DATA_PTR; static struct tag *params; @@ -181,6 +185,14 @@ static void setup_end_tag(bd_t *bd) __weak void setup_board_tags(struct tag **in_params) {} +static void do_nonsec_virt_switch(void) +{ +#if defined(CONFIG_ARMV7_NONSEC) || defined(CONFIG_ARMV7_VIRT) + if (armv7_switch_nonsec() == 0) + debug("entered non-secure state\n"); +#endif +} + /* Subcommand: PREP */ static void boot_prep_linux(bootm_headers_t *images) { @@ -217,6 +229,7 @@ static void boot_prep_linux(bootm_headers_t *images) printf("FDT and ATAGS support not compiled in - hanging\n"); hang(); } + do_nonsec_virt_switch(); } /* Subcommand: GO */ -- cgit v1.2.1 From ba6a1698116da272f14c53a3ae41467cb7fc4372 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:44 +0200 Subject: ARM: add SMP support for non-secure switch Currently the non-secure switch is only done for the boot processor. To enable full SMP support, we have to switch all secondary cores into non-secure state also. So we add an entry point for secondary CPUs coming out of low-power state and make sure we put them into WFI again after having switched to non-secure state. For this we acknowledge and EOI the wake-up IPI, then go into WFI. Once being kicked out of it later, we sanity check that the start address has actually been changed (since another attempt to switch to non-secure would block the core) and jump to the new address. The actual CPU kick is done by sending an inter-processor interrupt via the GIC to all CPU interfaces except the requesting processor. The secondary cores will then setup their respective GIC CPU interface. While this approach is pretty universal across several ARMv7 boards, we make this function weak in case someone needs to tweak this for a specific board. The way of setting the secondary's start address is board specific, but mostly different only in the actual SMP pen address, so we also provide a weak default implementation and just depend on the proper address to be set in the config file. Signed-off-by: Andre Przywara --- arch/arm/cpu/armv7/nonsec_virt.S | 35 +++++++++++++++++++++++++++++++++++ arch/arm/cpu/armv7/virt-v7.c | 16 +++++++++++++++- arch/arm/include/asm/armv7.h | 1 + arch/arm/include/asm/gic.h | 2 ++ 4 files changed, 53 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S index 3dd60b7137..cbee8f70a2 100644 --- a/arch/arm/cpu/armv7/nonsec_virt.S +++ b/arch/arm/cpu/armv7/nonsec_virt.S @@ -57,6 +57,28 @@ _secure_monitor: movs pc, lr @ return to non-secure SVC +/* + * Secondary CPUs start here and call the code for the core specific parts + * of the non-secure and HYP mode transition. The GIC distributor specific + * code has already been executed by a C function before. + * Then they go back to wfi and wait to be woken up by the kernel again. + */ +ENTRY(_smp_pen) + mrs r0, cpsr + orr r0, r0, #0xc0 + msr cpsr, r0 @ disable interrupts + ldr r1, =_start + mcr p15, 0, r1, c12, c0, 0 @ set VBAR + + bl _nonsec_init + + ldr r1, [r0, #GICC_IAR] @ acknowledge IPI + str r1, [r0, #GICC_EOIR] @ signal end of interrupt + + adr r0, _smp_pen @ do not use this address again + b smp_waitloop @ wait for IPIs, board specific +ENDPROC(_smp_pen) + /* * Switch a core to non-secure state. * @@ -138,3 +160,16 @@ ENTRY(_nonsec_init) bx lr ENDPROC(_nonsec_init) + +#ifdef CONFIG_SMP_PEN_ADDR +/* void __weak smp_waitloop(unsigned previous_address); */ +ENTRY(smp_waitloop) + wfi + ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address + ldr r1, [r1] + cmp r0, r1 @ make sure we dont execute this code + beq smp_waitloop @ again (due to a spurious wakeup) + mov pc, r1 +ENDPROC(smp_waitloop) +.weak smp_waitloop +#endif diff --git a/arch/arm/cpu/armv7/virt-v7.c b/arch/arm/cpu/armv7/virt-v7.c index 068ac85225..a0b8742464 100644 --- a/arch/arm/cpu/armv7/virt-v7.c +++ b/arch/arm/cpu/armv7/virt-v7.c @@ -79,6 +79,17 @@ static unsigned long get_gicd_base_address(void) #endif } +static void kick_secondary_cpus_gic(unsigned long gicdaddr) +{ + /* kick all CPUs (except this one) by writing to GICD_SGIR */ + writel(1U << 24, gicdaddr + GICD_SGIR); +} + +void __weak smp_kick_all_cpus(void) +{ + kick_secondary_cpus_gic(gic_dist_addr); +} + int armv7_switch_nonsec(void) { unsigned int reg; @@ -115,7 +126,10 @@ int armv7_switch_nonsec(void) for (i = 1; i <= itlinesnr; i++) writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); - /* call the non-sec switching code on this CPU */ + smp_set_core_boot_addr((unsigned long)_smp_pen, -1); + smp_kick_all_cpus(); + + /* call the non-sec switching code on this CPU also */ _nonsec_init(); return 0; diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h index b352d431b1..2efd4bcf42 100644 --- a/arch/arm/include/asm/armv7.h +++ b/arch/arm/include/asm/armv7.h @@ -82,6 +82,7 @@ int armv7_switch_nonsec(void); /* defined in assembly file */ unsigned int _nonsec_init(void); +void _smp_pen(void); #endif /* CONFIG_ARMV7_NONSEC */ #endif /* ! __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/gic.h b/arch/arm/include/asm/gic.h index c2b1e28624..a0891cc09c 100644 --- a/arch/arm/include/asm/gic.h +++ b/arch/arm/include/asm/gic.h @@ -13,5 +13,7 @@ #define GIC_CPU_OFFSET_A15 0x2000 #define GICC_CTLR 0x0000 #define GICC_PMR 0x0004 +#define GICC_IAR 0x000C +#define GICC_EOIR 0x0010 #endif -- cgit v1.2.1 From d4296887544ddf95808bfb62f312008f519efb7b Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 19 Sep 2013 18:06:45 +0200 Subject: ARM: extend non-secure switch to also go into HYP mode For the KVM and XEN hypervisors to be usable, we need to enter the kernel in HYP mode. Now that we already are in non-secure state, HYP mode switching is within short reach. While doing the non-secure switch, we have to enable the HVC instruction and setup the HYP mode HVBAR (while still secure). The actual switch is done by dropping back from a HYP mode handler without actually leaving HYP mode, so we introduce a new handler routine in our new secure exception vector table. In the assembly switching routine we save and restore the banked LR and SP registers around the hypercall to do the actual HYP mode switch. The C routine first checks whether we are in HYP mode already and also whether the virtualization extensions are available. It also checks whether the HYP mode switch was finally successful. The bootm command part only calls the new function after the non-secure switch. Signed-off-by: Andre Przywara --- arch/arm/cpu/armv7/Makefile | 2 +- arch/arm/cpu/armv7/nonsec_virt.S | 43 +++++++++++++++++++++++++++++++++++----- arch/arm/cpu/armv7/virt-v7.c | 37 ++++++++++++++++++++++++++++++++++ arch/arm/include/asm/armv7.h | 6 ++++-- arch/arm/lib/bootm.c | 7 ++++++- 5 files changed, 86 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile index 024c28b6e3..ee4b02183a 100644 --- a/arch/arm/cpu/armv7/Makefile +++ b/arch/arm/cpu/armv7/Makefile @@ -20,7 +20,7 @@ ifneq ($(CONFIG_AM43XX)$(CONFIG_AM33XX)$(CONFIG_OMAP44XX)$(CONFIG_OMAP54XX)$(CON SOBJS += lowlevel_init.o endif -ifneq ($(CONFIG_ARMV7_NONSEC),) +ifneq ($(CONFIG_ARMV7_NONSEC)$(CONFIG_ARMV7_VIRT),) SOBJS += nonsec_virt.o COBJS += virt-v7.o endif diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S index cbee8f70a2..358348ffa5 100644 --- a/arch/arm/cpu/armv7/nonsec_virt.S +++ b/arch/arm/cpu/armv7/nonsec_virt.S @@ -1,5 +1,5 @@ /* - * code for switching cores into non-secure state + * code for switching cores into non-secure state and into HYP mode * * Copyright (c) 2013 Andre Przywara * @@ -28,15 +28,16 @@ #include .arch_extension sec +.arch_extension virt -/* the vector table for secure state */ +/* the vector table for secure state and HYP mode */ _monitor_vectors: .word 0 /* reset */ .word 0 /* undef */ adr pc, _secure_monitor .word 0 .word 0 - .word 0 + adr pc, _hyp_trap .word 0 .word 0 @@ -53,10 +54,27 @@ _secure_monitor: bic r1, r1, #0x4e @ clear IRQ, FIQ, EA, nET bits orr r1, r1, #0x31 @ enable NS, AW, FW bits +#ifdef CONFIG_ARMV7_VIRT + mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 + and r0, r0, #CPUID_ARM_VIRT_MASK @ mask virtualization bits + cmp r0, #(1 << CPUID_ARM_VIRT_SHIFT) + orreq r1, r1, #0x100 @ allow HVC instruction +#endif + mcr p15, 0, r1, c1, c1, 0 @ write SCR (with NS bit set) +#ifdef CONFIG_ARMV7_VIRT + mrceq p15, 0, r0, c12, c0, 1 @ get MVBAR value + mcreq p15, 4, r0, c12, c0, 0 @ write HVBAR +#endif + movs pc, lr @ return to non-secure SVC +_hyp_trap: + mrs lr, elr_hyp @ for older asm: .byte 0x00, 0xe3, 0x0e, 0xe1 + mov pc, lr @ do no switch modes, but + @ return to caller + /* * Secondary CPUs start here and call the code for the core specific parts * of the non-secure and HYP mode transition. The GIC distributor specific @@ -71,9 +89,13 @@ ENTRY(_smp_pen) mcr p15, 0, r1, c12, c0, 0 @ set VBAR bl _nonsec_init + mov r12, r0 @ save GICC address +#ifdef CONFIG_ARMV7_VIRT + bl _switch_to_hyp +#endif - ldr r1, [r0, #GICC_IAR] @ acknowledge IPI - str r1, [r0, #GICC_EOIR] @ signal end of interrupt + ldr r1, [r12, #GICC_IAR] @ acknowledge IPI + str r1, [r12, #GICC_EOIR] @ signal end of interrupt adr r0, _smp_pen @ do not use this address again b smp_waitloop @ wait for IPIs, board specific @@ -173,3 +195,14 @@ ENTRY(smp_waitloop) ENDPROC(smp_waitloop) .weak smp_waitloop #endif + +ENTRY(_switch_to_hyp) + mov r0, lr + mov r1, sp @ save SVC copy of LR and SP + isb + hvc #0 @ for older asm: .byte 0x70, 0x00, 0x40, 0xe1 + mov sp, r1 + mov lr, r0 @ restore SVC copy of LR and SP + + bx lr +ENDPROC(_switch_to_hyp) diff --git a/arch/arm/cpu/armv7/virt-v7.c b/arch/arm/cpu/armv7/virt-v7.c index a0b8742464..6de7fe7813 100644 --- a/arch/arm/cpu/armv7/virt-v7.c +++ b/arch/arm/cpu/armv7/virt-v7.c @@ -3,6 +3,7 @@ * Andre Przywara, Linaro * * Routines to transition ARMv7 processors from secure into non-secure state + * and from non-secure SVC into HYP mode * needed to enable ARMv7 virtualization for current hypervisors * * See file CREDITS for list of people who contributed to this @@ -31,6 +32,14 @@ unsigned long gic_dist_addr; +static unsigned int read_cpsr(void) +{ + unsigned int reg; + + asm volatile ("mrs %0, cpsr\n" : "=r" (reg)); + return reg; +} + static unsigned int read_id_pfr1(void) { unsigned int reg; @@ -90,6 +99,34 @@ void __weak smp_kick_all_cpus(void) kick_secondary_cpus_gic(gic_dist_addr); } +int armv7_switch_hyp(void) +{ + unsigned int reg; + + /* check whether we are in HYP mode already */ + if ((read_cpsr() & 0x1f) == 0x1a) { + debug("CPU already in HYP mode\n"); + return 0; + } + + /* check whether the CPU supports the virtualization extensions */ + reg = read_id_pfr1(); + if ((reg & CPUID_ARM_VIRT_MASK) != 1 << CPUID_ARM_VIRT_SHIFT) { + printf("HYP mode: Virtualization extensions not implemented.\n"); + return -1; + } + + /* call the HYP switching code on this CPU also */ + _switch_to_hyp(); + + if ((read_cpsr() & 0x1F) != 0x1a) { + printf("HYP mode: switch not successful.\n"); + return -1; + } + + return 0; +} + int armv7_switch_nonsec(void) { unsigned int reg; diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h index 2efd4bcf42..395444ee4f 100644 --- a/arch/arm/include/asm/armv7.h +++ b/arch/arm/include/asm/armv7.h @@ -76,14 +76,16 @@ void v7_outer_cache_inval_all(void); void v7_outer_cache_flush_range(u32 start, u32 end); void v7_outer_cache_inval_range(u32 start, u32 end); -#ifdef CONFIG_ARMV7_NONSEC +#if defined(CONFIG_ARMV7_NONSEC) || defined(CONFIG_ARMV7_VIRT) int armv7_switch_nonsec(void); +int armv7_switch_hyp(void); /* defined in assembly file */ unsigned int _nonsec_init(void); void _smp_pen(void); -#endif /* CONFIG_ARMV7_NONSEC */ +void _switch_to_hyp(void); +#endif /* CONFIG_ARMV7_NONSEC || CONFIG_ARMV7_VIRT */ #endif /* ! __ASSEMBLY__ */ diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index b3a961a825..f476a89702 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -22,7 +22,7 @@ #include #include -#ifdef CONFIG_ARMV7_NONSEC +#if defined(CONFIG_ARMV7_NONSEC) || defined(CONFIG_ARMV7_VIRT) #include #endif @@ -189,8 +189,13 @@ static void do_nonsec_virt_switch(void) { #if defined(CONFIG_ARMV7_NONSEC) || defined(CONFIG_ARMV7_VIRT) if (armv7_switch_nonsec() == 0) +#ifdef CONFIG_ARMV7_VIRT + if (armv7_switch_hyp() == 0) + debug("entered HYP mode\n"); +#else debug("entered non-secure state\n"); #endif +#endif } /* Subcommand: PREP */ -- cgit v1.2.1