diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-06-17 16:52:14 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-06-17 16:52:34 +0100 |
commit | 04e71d72abec3e1a2b2ac10f96ced1a471ecb3aa (patch) | |
tree | cca2a5ba9eb2ccbfb325660341f1741321df7690 /arch/arm/kernel | |
parent | fdeb94b5dc5bf9db7b3e36f3f38089a554f6a108 (diff) | |
parent | de8297765def67ec40b69522f3e405a61e0217b3 (diff) | |
download | blackbird-op-linux-04e71d72abec3e1a2b2ac10f96ced1a471ecb3aa.tar.gz blackbird-op-linux-04e71d72abec3e1a2b2ac10f96ced1a471ecb3aa.zip |
Merge branch 'ja-nommu-for-rmk-v2' of git://linux-arm.org/linux-ja into devel-stable
This includes the following series sent earlier to the list:
- nommu-fixes
- R7 Support
- MPU support
I've left out the ARCH_MULTIPLATFORM/!MMU stuff that Arnd and I were
discussing today until we've reached a conclusion/that's had some more
review.
This is rebased (and re-tested) on your devel-stable branch because
otherwise there were going to be conflicts with Uwe's V7M work now that
you've merged that. I've included the fix for limiting MPU to CPU_V7.
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 160 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/suspend.c | 64 |
5 files changed, 211 insertions, 37 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index f4285b5ffb05..fccfbdb03df1 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -38,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o -obj-$(CONFIG_SMP) += smp.o smp_tlb.o +obj-$(CONFIG_SMP) += smp.o +ifdef CONFIG_MMU +obj-$(CONFIG_SMP) += smp_tlb.o +endif obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 8812ce88f7a1..75f14cc3e073 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -17,9 +17,12 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#include <asm/memory.h> #include <asm/cp15.h> #include <asm/thread_info.h> #include <asm/v7m.h> +#include <asm/mpu.h> +#include <asm/page.h> /* * Kernel startup entry point. @@ -63,12 +66,74 @@ ENTRY(stext) movs r10, r5 @ invalid processor (r5=0)? beq __error_p @ yes, error 'p' - adr lr, BSYM(__after_proc_init) @ return (PIC) address +#ifdef CONFIG_ARM_MPU + /* Calculate the size of a region covering just the kernel */ + ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r6, =(_end) @ Cover whole kernel + sub r6, r6, r5 @ Minimum size of region to map + clz r6, r6 @ Region size must be 2^N... + rsb r6, r6, #31 @ ...so round up region size + lsl r6, r6, #MPU_RSR_SZ @ Put size in right field + orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit + bl __setup_mpu +#endif + ldr r13, =__mmap_switched @ address to jump to after + @ initialising sctlr + adr lr, BSYM(1f) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) + 1: b __after_proc_init ENDPROC(stext) +#ifdef CONFIG_SMP + __CPUINIT +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 +#ifndef CONFIG_CPU_CP15 + ldr r9, =CONFIG_PROCESSOR_ID +#else + mrc p15, 0, r9, c0, c0 @ get processor id +#endif + bl __lookup_processor_type @ r5=procinfo r9=cpuid + movs r10, r5 @ invalid processor? + beq __error_p @ yes, error 'p' + + adr r4, __secondary_data + ldmia r4, {r7, r12} + +#ifdef CONFIG_ARM_MPU + /* Use MPU region info supplied by __cpu_up */ + ldr r6, [r7] @ get secondary_data.mpu_szr + bl __setup_mpu @ Initialize the MPU +#endif + + adr lr, BSYM(__after_proc_init) @ return address + mov r13, r12 @ __secondary_switched address + ARM( add pc, r10, #PROCINFO_INITFUNC ) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) +ENDPROC(secondary_startup) + +ENTRY(__secondary_switched) + ldr sp, [r7, #8] @ set up the stack pointer + mov fp, #0 + b secondary_start_kernel +ENDPROC(__secondary_switched) + + .type __secondary_data, %object +__secondary_data: + .long secondary_data + .long __secondary_switched +#endif /* CONFIG_SMP */ + /* * Set the Control Register and Read the process ID. */ @@ -99,10 +164,97 @@ __after_proc_init: #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ - - b __mmap_switched @ clear the BSS and jump - @ to start_kernel + mov pc, r13 ENDPROC(__after_proc_init) .ltorg +#ifdef CONFIG_ARM_MPU + + +/* Set which MPU region should be programmed */ +.macro set_region_nr tmp, rgnr + mov \tmp, \rgnr @ Use static region numbers + mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR +.endm + +/* Setup a single MPU region, either D or I side (D-side for unified) */ +.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE + mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR + mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR + mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR +.endm + +/* + * Setup the MPU and initial MPU Regions. We create the following regions: + * Region 0: Use this for probing the MPU details, so leave disabled. + * Region 1: Background region - covers the whole of RAM as strongly ordered + * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 + * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page + * + * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION +*/ + +ENTRY(__setup_mpu) + + /* Probe for v7 PMSA compliance */ + mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 + and r0, r0, #(MMFR0_PMSA) @ PMSA field + teq r0, #(MMFR0_PMSAv7) @ PMSA v7 + bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA + + /* Determine whether the D/I-side memory map is unified. We set the + * flags here and continue to use them for the rest of this function */ + mrc p15, 0, r0, c0, c0, 4 @ MPUIR + ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU + beq __error_p @ Fail: ARM_MPU and no MPU + tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified + + /* Setup second region first to free up r6 */ + set_region_nr r0, #MPU_RAM_REGION + isb + /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ + ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET + ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) + + setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled + beq 1f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled +1: isb + + /* First/background region */ + set_region_nr r0, #MPU_BG_REGION + isb + /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ + mov r0, #0 @ BG region starts at 0x0 + ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) + mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled + + setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled + beq 2f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled +2: isb + + /* Vectors region */ + set_region_nr r0, #MPU_VECTORS_REGION + isb + /* Shared, inaccessible to PL0, rw PL1 */ + mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE + ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) + /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ + mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) + + setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled + beq 3f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled +3: isb + + /* Enable the MPU */ + mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR + bic r0, r0, #CR_BR @ Disable the 'default mem-map' + orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) + mcr p15, 0, r0, c1, c0, 0 @ Enable MPU + isb + mov pc,lr +ENDPROC(__setup_mpu) +#endif #include "head-common.S" diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..1c16c35c271a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 3; + /* + * Put the sigreturn code on the stack no matter which return + * mechanism we use in order to remain ABI compliant + */ if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if (cpsr & MODE32_BIT) { + if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { /* * 32-bit code can use the new high-page - * signal return code support. + * signal return code support except when the MPU has + * protected the vectors page from PL0 */ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; } else { diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 550d63cef68e..e17d9346baee 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -45,6 +45,7 @@ #include <asm/smp_plat.h> #include <asm/virt.h> #include <asm/mach/arch.h> +#include <asm/mpu.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -87,8 +88,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; +#ifdef CONFIG_ARM_MPU + secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; +#endif + +#ifdef CONFIG_MMU secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); +#endif __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); @@ -112,9 +119,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } - secondary_data.stack = NULL; - secondary_data.pgdir = 0; + memset(&secondary_data, 0, sizeof(secondary_data)); return ret; } diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index c59c97ea8268..38a50676213b 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -10,6 +10,42 @@ extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); extern void cpu_resume_mmu(void); +#ifdef CONFIG_MMU +/* + * Hide the first two arguments to __cpu_suspend - these are an implementation + * detail which platform code shouldn't have to know about. + */ +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + struct mm_struct *mm = current->active_mm; + int ret; + + if (!idmap_pgd) + return -EINVAL; + + /* + * Provide a temporary page table with an identity mapping for + * the MMU-enable code, required for resuming. On successful + * resume (indicated by a zero return code), we need to switch + * back to the correct page tables. + */ + ret = __cpu_suspend(arg, fn); + if (ret == 0) { + cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); + local_flush_tlb_all(); + } + + return ret; +} +#else +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + return __cpu_suspend(arg, fn); +} +#define idmap_pgd NULL +#endif + /* * This is called by __cpu_suspend() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have @@ -46,31 +82,3 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) outer_clean_range(virt_to_phys(save_ptr), virt_to_phys(save_ptr) + sizeof(*save_ptr)); } - -/* - * Hide the first two arguments to __cpu_suspend - these are an implementation - * detail which platform code shouldn't have to know about. - */ -int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) -{ - struct mm_struct *mm = current->active_mm; - int ret; - - if (!idmap_pgd) - return -EINVAL; - - /* - * Provide a temporary page table with an identity mapping for - * the MMU-enable code, required for resuming. On successful - * resume (indicated by a zero return code), we need to switch - * back to the correct page tables. - */ - ret = __cpu_suspend(arg, fn); - if (ret == 0) { - cpu_switch_mm(mm->pgd, mm); - local_flush_bp_all(); - local_flush_tlb_all(); - } - - return ret; -} |