diff options
author | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-06-20 15:51:24 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-06-20 15:51:24 +0100 |
commit | df5179854bca84ac5be500849b12dd33ce03f03f (patch) | |
tree | 78cf16415489e70f34c58f2c7f5c2e63696e9761 /arch | |
parent | 0f45aa18e65cf3d768082d7d86054a0d2a20bb18 (diff) | |
parent | 8b22c249e7de453961e4d253b19fc2a0bdd65d53 (diff) | |
download | blackbird-op-linux-df5179854bca84ac5be500849b12dd33ce03f03f.tar.gz blackbird-op-linux-df5179854bca84ac5be500849b12dd33ce03f03f.zip |
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 254 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 7 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 44 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 52 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 107 | ||||
-rw-r--r-- | arch/arm/mach-integrator/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mach-integrator/core.c | 20 | ||||
-rw-r--r-- | arch/arm/mach-integrator/headsmp.S | 37 | ||||
-rw-r--r-- | arch/arm/mach-integrator/leds.c | 4 | ||||
-rw-r--r-- | arch/arm/mach-integrator/platsmp.c | 192 | ||||
-rw-r--r-- | arch/arm/mach-pxa/pm.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-sa1100/pm.c | 2 |
12 files changed, 597 insertions, 125 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e14278d59882..39a6c1b0b9a3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -24,48 +24,91 @@ #include "entry-header.S" /* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro irq_handler +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, 1b + bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + test_for_ipi r0, r6, r5, lr + movne r0, sp + adrne lr, 1b + bne do_IPI +#endif + + .endm + +/* * Invalid mode handlers */ - .macro inv_entry, sym, reason - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - lr} @ Save XXX r0 - lr - ldr r4, .LC\sym + .macro inv_entry, reason + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - lr} mov r1, #\reason .endm __pabt_invalid: - inv_entry abt, BAD_PREFETCH - b 1f + inv_entry BAD_PREFETCH + b common_invalid __dabt_invalid: - inv_entry abt, BAD_DATA - b 1f + inv_entry BAD_DATA + b common_invalid __irq_invalid: - inv_entry irq, BAD_IRQ - b 1f + inv_entry BAD_IRQ + b common_invalid __und_invalid: - inv_entry und, BAD_UNDEFINSTR + inv_entry BAD_UNDEFINSTR + + @ + @ XXX fall through to common_invalid + @ + +@ +@ common_invalid - generic code for failed exception (re-entrant version of handlers) +@ +common_invalid: + zero_fp + + ldmia r0, {r4 - r6} + add r0, sp, #S_PC @ here for interlock avoidance + mov r7, #-1 @ "" "" "" "" + str r4, [sp] @ save preserved r0 + stmia r0, {r5 - r7} @ lr_<exception>, + @ cpsr_<exception>, "old_r0" -1: zero_fp - ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 - add r4, sp, #S_PC - stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 mov r0, sp - and r2, r6, #31 @ int mode + and r2, r6, #0x1f b bad_mode /* * SVC mode handlers */ - .macro svc_entry, sym + .macro svc_entry sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LC\sym - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r5, sp, #S_SP @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + add r0, sp, #S_FRAME_SIZE @ "" "" "" "" + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack + mov r1, lr @ @@ -82,7 +125,7 @@ __und_invalid: .align 5 __dabt_svc: - svc_entry abt + svc_entry @ @ get ready to re-enable interrupts if appropriate @@ -129,28 +172,24 @@ __dabt_svc: .align 5 __irq_svc: - svc_entry irq + svc_entry + #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_FLAGS] @ get flags + ldr r0, [tsk, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED blne svc_preempt preempt_return: - ldr r0, [r8, #TI_PREEMPT] @ read preempt value + ldr r0, [tsk, #TI_PREEMPT] @ read preempt value + str r8, [tsk, #TI_PREEMPT] @ restore preempt count teq r0, r7 - str r9, [r8, #TI_PREEMPT] @ restore preempt count strne r0, [r0, -r0] @ bug() #endif ldr r0, [sp, #S_PSR] @ irqs are already disabled @@ -161,7 +200,7 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: - teq r9, #0 @ was preempt count = 0 + teq r8, #0 @ was preempt count = 0 ldreq r6, .LCirq_stat movne pc, lr @ no ldr r0, [r6, #4] @ local_irq_count @@ -169,9 +208,9 @@ svc_preempt: adds r0, r0, r1 movne pc, lr mov r7, #0 @ preempt_schedule_irq - str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 + str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED beq preempt_return @ go again b 1b @@ -179,7 +218,7 @@ svc_preempt: .align 5 __und_svc: - svc_entry und + svc_entry @ @ call emulation code, which returns using r9 if it has emulated @@ -209,7 +248,7 @@ __und_svc: .align 5 __pabt_svc: - svc_entry abt + svc_entry @ @ re-enable interrupts if appropriate @@ -242,12 +281,8 @@ __pabt_svc: ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5 -.LCirq: - .word __temp_irq -.LCund: - .word __temp_und -.LCabt: - .word __temp_abt +.LCcralign: + .word cr_alignment #ifdef MULTI_ABORT .LCprocfns: .word processor @@ -262,12 +297,16 @@ __pabt_svc: /* * User mode handlers */ - .macro usr_entry, sym - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LC\sym - add r5, sp, #S_PC - ldmia r7, {r2 - r4} @ Get USR pc, cpsr + .macro usr_entry + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r0, sp, #S_PC @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @ make sure our user space atomic helper is aborted @@ -284,13 +323,13 @@ __pabt_svc: @ @ Also, separately save sp_usr and lr_usr @ - stmia r5, {r2 - r4} - stmdb r5, {sp, lr}^ + stmia r0, {r2 - r4} + stmdb r0, {sp, lr}^ @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r7, r0, __temp_\sym + alignment_trap r0 @ @ Clear FP to mark the first stack frame @@ -300,7 +339,7 @@ __pabt_svc: .align 5 __dabt_usr: - usr_entry abt + usr_entry @ @ Call the processor-specific abort handler: @@ -329,30 +368,23 @@ __dabt_usr: .align 5 __irq_usr: - usr_entry irq + usr_entry + get_thread_info tsk #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - adrne lr, 1b - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_PREEMPT] + ldr r0, [tsk, #TI_PREEMPT] + str r8, [tsk, #TI_PREEMPT] teq r0, r7 - str r9, [r8, #TI_PREEMPT] strne r0, [r0, -r0] - mov tsk, r8 -#else - get_thread_info tsk #endif + mov why, #0 b ret_to_user @@ -360,7 +392,7 @@ __irq_usr: .align 5 __und_usr: - usr_entry und + usr_entry tst r3, #PSR_T_BIT @ Thumb mode? bne fpundefinstr @ ignore FP @@ -476,7 +508,7 @@ fpundefinstr: .align 5 __pabt_usr: - usr_entry abt + usr_entry enable_irq @ Enable interrupts mov r0, r2 @ address (pc) @@ -741,29 +773,41 @@ __kuser_helper_end: * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC + * + * SP points to a minimal amount of processor-private memory, the address + * of which is copied into r0 for the mode specific abort handler. */ - .macro vector_stub, name, sym, correction=0 + .macro vector_stub, name, correction=0 .align 5 vector_\name: - ldr r13, .LCs\sym .if \correction sub lr, lr, #\correction .endif - str lr, [r13] @ save lr_IRQ + + @ + @ Save r0, lr_<exception> (parent PC) and spsr_<exception> + @ (parent CPSR) + @ + stmia sp, {r0, lr} @ save r0, lr mrs lr, spsr - str lr, [r13, #4] @ save spsr_IRQ + str lr, [sp, #8] @ save spsr + @ - @ now branch to the relevant MODE handling routine + @ Prepare for SVC32 mode. IRQs remain disabled. @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #SVC_MODE - msr spsr_cxsf, r13 @ switch to SVC_32 mode + mrs r0, cpsr + bic r0, r0, #MODE_MASK + orr r0, r0, #SVC_MODE + msr spsr_cxsf, r0 - and lr, lr, #15 + @ + @ the branch table must immediately follow this code + @ + mov r0, sp + and lr, lr, #0x0f ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches + movs pc, lr @ branch to handler in SVC mode .endm .globl __stubs_start @@ -771,7 +815,7 @@ __stubs_start: /* * Interrupt dispatcher */ - vector_stub irq, irq, 4 + vector_stub irq, 4 .long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) @@ -794,7 +838,7 @@ __stubs_start: * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub dabt, abt, 8 + vector_stub dabt, 8 .long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -817,7 +861,7 @@ __stubs_start: * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub pabt, abt, 4 + vector_stub pabt, 4 .long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -840,7 +884,7 @@ __stubs_start: * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - vector_stub und, und + vector_stub und .long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) @@ -894,13 +938,6 @@ vector_addrexcptn: .LCvswi: .word vector_swi -.LCsirq: - .word __temp_irq -.LCsund: - .word __temp_und -.LCsabt: - .word __temp_abt - .globl __stubs_end __stubs_end: @@ -922,23 +959,6 @@ __vectors_end: .data -/* - * Do not reorder these, and do not insert extra data between... - */ - -__temp_irq: - .word 0 @ saved lr_irq - .word 0 @ saved spsr_irq - .word -1 @ old_r0 -__temp_und: - .word 0 @ Saved lr_und - .word 0 @ Saved spsr_und - .word -1 @ old_r0 -__temp_abt: - .word 0 @ Saved lr_abt - .word 0 @ Saved spsr_abt - .word -1 @ old_r0 - .globl cr_alignment .globl cr_no_alignment cr_alignment: diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index a3d40a0e2b04..afef21273963 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -59,11 +59,10 @@ mov \rd, \rd, lsl #13 .endm - .macro alignment_trap, rbase, rtemp, sym + .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP -#define OFF_CR_ALIGNMENT(x) cr_alignment - x - - ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)] + ldr \rtemp, .LCcralign + ldr \rtemp, [\rtemp] mcr p15, 0, \rtemp, c1, c0 #endif .endm diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4733877296d4..bd4823c74645 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -2,6 +2,8 @@ * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King + * Copyright (c) 2003 ARM Limited + * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -165,6 +167,48 @@ __mmap_switched: stmia r6, {r0, r4} @ Save control register values b start_kernel +#if defined(CONFIG_SMP) + .type secondary_startup, #function +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC + bl __lookup_processor_type + movs r10, r5 @ invalid processor? + moveq r0, #'p' @ yes, error 'p' + beq __error + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r6, r13} @ address to jump to after + sub r4, r4, r5 @ mmu has been enabled + ldr r4, [r6, r4] @ get secondary_data.pgdir + adr lr, __enable_mmu @ return address + add pc, r10, #12 @ initialise processor + @ (return control reg) + + /* + * r6 = &secondary_data + */ +ENTRY(__secondary_switched) + ldr sp, [r6, #4] @ get secondary_data.stack + mov fp, #0 + b secondary_start_kernel + + .type __secondary_data, %object +__secondary_data: + .long . + .long secondary_data + .long __secondary_switched +#endif /* defined(CONFIG_SMP) */ + /* diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c2a7da3ac0f1..9fed5fa194d9 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user; struct cpu_cache_fns cpu_cache; #endif +struct stack { + u32 irq[3]; + u32 abt[3]; + u32 und[3]; +} ____cacheline_aligned; + +static struct stack stacks[NR_CPUS]; + char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); @@ -307,8 +315,6 @@ static void __init setup_processor(void) cpu_name, processor_id, (int)processor_id & 15, proc_arch[cpu_architecture()]); - dump_cpu_info(smp_processor_id()); - sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; @@ -316,6 +322,46 @@ static void __init setup_processor(void) cpu_proc_init(); } +/* + * cpu_init - initialise one CPU. + * + * cpu_init dumps the cache information, initialises SMP specific + * information, and sets up the per-CPU stacks. + */ +void cpu_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct stack *stk = &stacks[cpu]; + + if (cpu >= NR_CPUS) { + printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); + BUG(); + } + + dump_cpu_info(cpu); + + /* + * setup stacks for re-entrant exception handlers + */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add sp, %0, %2\n\t" + "msr cpsr_c, %3\n\t" + "add sp, %0, %4\n\t" + "msr cpsr_c, %5\n\t" + "add sp, %0, %6\n\t" + "msr cpsr_c, %7" + : + : "r" (stk), + "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), + "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), + "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)); +} + static struct machine_desc * __init setup_machine(unsigned int nr) { struct machine_desc *list; @@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p) paging_init(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc); + cpu_init(); + /* * Set up various architecture-specific pointers */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ecc8c3332408..45ed036336e0 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -24,6 +24,9 @@ #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> @@ -37,6 +40,13 @@ cpumask_t cpu_present_mask; cpumask_t cpu_online_map; /* + * as from 2.5, kernels no longer have an init_tasks structure + * so we need some other way of telling a new secondary core + * where to place its SVC stack + */ +struct secondary_data secondary_data; + +/* * structures for inter-processor calls * - A collection of single bit ipi messages. */ @@ -71,6 +81,8 @@ static DEFINE_SPINLOCK(smp_call_function_lock); int __init __cpu_up(unsigned int cpu) { struct task_struct *idle; + pgd_t *pgd; + pmd_t *pmd; int ret; /* @@ -84,9 +96,54 @@ int __init __cpu_up(unsigned int cpu) } /* + * Allocate initial page tables to allow the new CPU to + * enable the MMU safely. This essentially means a set + * of our "standard" page tables, with the addition of + * a 1:1 mapping for the physical address of the kernel. + */ + pgd = pgd_alloc(&init_mm); + pmd = pmd_offset(pgd, PHYS_OFFSET); + *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | + PMD_TYPE_SECT | PMD_SECT_AP_WRITE); + + /* + * We need to tell the secondary core where to find + * its stack and the page tables. + */ + secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8; + secondary_data.pgdir = virt_to_phys(pgd); + wmb(); + + /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); + if (ret == 0) { + unsigned long timeout; + + /* + * CPU was successfully started, wait for it + * to come online or time out. + */ + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpu)) + break; + + udelay(10); + barrier(); + } + + if (!cpu_online(cpu)) + ret = -EIO; + } + + secondary_data.stack = 0; + secondary_data.pgdir = 0; + + *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); + pgd_free(pgd); + if (ret) { printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); /* @@ -98,6 +155,56 @@ int __init __cpu_up(unsigned int cpu) } /* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack, but a set of temporary page tables. + */ +asmlinkage void __init secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + printk("CPU%u: Booted secondary processor\n", cpu); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpu_set(cpu, mm->cpu_vm_mask); + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + + cpu_init(); + + /* + * Give the platform a chance to do its own initialisation. + */ + platform_secondary_init(cpu); + + /* + * Enable local interrupts. + */ + local_irq_enable(); + local_fiq_enable(); + + calibrate_delay(); + + smp_store_cpu_info(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + cpu_set(cpu, cpu_online_map); + + /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +} + +/* * Called by both boot and secondaries to move global data into * per-processor storage. */ diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile index 158daaf9e3b0..ebb255bdce8a 100644 --- a/arch/arm/mach-integrator/Makefile +++ b/arch/arm/mach-integrator/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS) += leds.o obj-$(CONFIG_PCI) += pci_v3.o pci.o obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o +obj-$(CONFIG_SMP) += platsmp.o headsmp.o diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index bd17b5154311..d302f0405fd2 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/smp.h> #include <asm/hardware.h> #include <asm/irq.h> @@ -221,7 +222,24 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) */ timer1->TimerClear = 1; - timer_tick(regs); + /* + * the clock tick routines are only processed on the + * primary CPU + */ + if (hard_smp_processor_id() == 0) { + nmi_tick(); + timer_tick(regs); +#ifdef CONFIG_SMP + smp_send_timer(); +#endif + } + +#ifdef CONFIG_SMP + /* + * this is the ARM equivalent of the APIC timer interrupt + */ + update_process_times(user_mode(regs)); +#endif /* CONFIG_SMP */ write_sequnlock(&xtime_lock); diff --git a/arch/arm/mach-integrator/headsmp.S b/arch/arm/mach-integrator/headsmp.S new file mode 100644 index 000000000000..ceaa88e30d70 --- /dev/null +++ b/arch/arm/mach-integrator/headsmp.S @@ -0,0 +1,37 @@ +/* + * linux/arch/arm/mach-integrator/headsmp.S + * + * Copyright (c) 2003 ARM Limited + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <linux/init.h> + + __INIT + +/* + * Integrator specific entry point for secondary CPUs. This provides + * a "holding pen" into which all secondary cores are held until we're + * ready for them to initialise. + */ +ENTRY(integrator_secondary_startup) + adr r4, 1f + ldmia r4, {r5, r6} + sub r4, r4, r5 + ldr r6, [r6, r4] +pen: ldr r7, [r6] + cmp r7, r0 + bne pen + + /* + * we've been released from the holding pen: secondary_stack + * should now contain the SVC stack for this core + */ + b secondary_startup + +1: .long . + .long phys_pen_release diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c index d2c0ab21150c..f1436e683b49 100644 --- a/arch/arm/mach-integrator/leds.c +++ b/arch/arm/mach-integrator/leds.c @@ -22,6 +22,8 @@ */ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <asm/hardware.h> #include <asm/io.h> @@ -85,4 +87,4 @@ static int __init leds_init(void) return 0; } -__initcall(leds_init); +core_initcall(leds_init); diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c new file mode 100644 index 000000000000..ead15dfcb53d --- /dev/null +++ b/arch/arm/mach-integrator/platsmp.c @@ -0,0 +1,192 @@ +/* + * linux/arch/arm/mach-cintegrator/platsmp.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/errno.h> +#include <linux/mm.h> + +#include <asm/atomic.h> +#include <asm/delay.h> +#include <asm/mmu_context.h> +#include <asm/procinfo.h> +#include <asm/ptrace.h> +#include <asm/smp.h> + +extern void integrator_secondary_startup(void); + +/* + * control for which core is the next to come out of the secondary + * boot "holding pen" + */ +volatile int __initdata pen_release = -1; +unsigned long __initdata phys_pen_release = 0; + +static DEFINE_SPINLOCK(boot_lock); + +void __init platform_secondary_init(unsigned int cpu) +{ + /* + * the primary core may have used a "cross call" soft interrupt + * to get this processor out of WFI in the BootMonitor - make + * sure that we are no longer being sent this soft interrupt + */ + smp_cross_call_done(cpumask_of_cpu(cpu)); + + /* + * if any interrupts are already enabled for the primary + * core (e.g. timer irq), then they will not have been enabled + * for us: do so + */ + secondary_scan_irqs(); + + /* + * let the primary processor know we're out of the + * pen, then head off into the C entry point + */ + pen_release = -1; + + /* + * Synchronise with the boot thread. + */ + spin_lock(&boot_lock); + spin_unlock(&boot_lock); +} + +int __init boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + unsigned long timeout; + + /* + * set synchronisation state between this boot processor + * and the secondary one + */ + spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from + * the holding pen - release it, then wait for it to flag + * that it has been released by resetting pen_release. + * + * Note that "pen_release" is the hardware CPU ID, whereas + * "cpu" is Linux's internal ID. + */ + pen_release = cpu; + + /* + * XXX + * + * This is a later addition to the booting protocol: the + * bootMonitor now puts secondary cores into WFI, so + * poke_milo() no longer gets the cores moving; we need + * to send a soft interrupt to wake the secondary core. + * Use smp_cross_call() for this, since there's little + * point duplicating the code here + */ + smp_cross_call(cpumask_of_cpu(cpu)); + + timeout = jiffies + (1 * HZ); + while (time_before(jiffies, timeout)) { + if (pen_release == -1) + break; + + udelay(10); + } + + /* + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; +} + +static void __init poke_milo(void) +{ + extern void secondary_startup(void); + + /* nobody is to be released from the pen yet */ + pen_release = -1; + + phys_pen_release = virt_to_phys(&pen_release); + + /* + * write the address of secondary startup into the system-wide + * flags register, then clear the bottom two bits, which is what + * BootMonitor is waiting for + */ +#if 1 +#define CINTEGRATOR_HDR_FLAGSS_OFFSET 0x30 + __raw_writel(virt_to_phys(integrator_secondary_startup), + (IO_ADDRESS(INTEGRATOR_HDR_BASE) + + CINTEGRATOR_HDR_FLAGSS_OFFSET)); +#define CINTEGRATOR_HDR_FLAGSC_OFFSET 0x34 + __raw_writel(3, + (IO_ADDRESS(INTEGRATOR_HDR_BASE) + + CINTEGRATOR_HDR_FLAGSC_OFFSET)); +#endif + + mb(); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int ncores = get_core_count(); + unsigned int cpu = smp_processor_id(); + int i; + + /* sanity check */ + if (ncores == 0) { + printk(KERN_ERR + "Integrator/CP: strange CM count of 0? Default to 1\n"); + + ncores = 1; + } + + if (ncores > NR_CPUS) { + printk(KERN_WARNING + "Integrator/CP: no. of cores (%d) greater than configured " + "maximum of %d - clipping\n", + ncores, NR_CPUS); + ncores = NR_CPUS; + } + + /* + * start with some more config for the Boot CPU, now that + * the world is a bit more alive (which was not the case + * when smp_prepare_boot_cpu() was called) + */ + smp_store_cpu_info(cpu); + + /* + * are we trying to boot more cores than exist? + */ + if (max_cpus > ncores) + max_cpus = ncores; + + /* + * Initialise the present mask - this tells us which CPUs should + * be present. + */ + for (i = 0; i < max_cpus; i++) { + cpu_set(i, cpu_present_mask); + } + + /* + * Do we need any more CPUs? If so, then let them know where + * to start. Note that, on modern versions of MILO, the "poke" + * doesn't actually do anything until each individual core is + * sent a soft interrupt to get it out of WFI + */ + if (max_cpus > 1) + poke_milo(); +} diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 9799fe80df23..ac4dd4336160 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -133,6 +133,8 @@ static int pxa_pm_enter(suspend_state_t state) /* *** go zzz *** */ pxa_cpu_pm_enter(state); + cpu_init(); + /* after sleeping, validate the checksum */ checksum = 0; for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++) diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 379ea5e3950f..59c7964cfe11 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -88,6 +88,8 @@ static int sa11x0_pm_enter(suspend_state_t state) /* go zzz */ sa1100_cpu_suspend(); + cpu_init(); + /* * Ensure not to come back here if it wasn't intended */ |