summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-vexpress
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 22:31:35 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 22:32:03 +0000
commit4ec3eb13634529c0bc7466658d84d0bbe3244aea (patch)
treeb491daac2ccfc7b8ca88e171a43f66888463568a /arch/arm/mach-vexpress
parent24056f525051a9e186af28904b396320e18bf9a0 (diff)
parent15095bb0fe779c0403091bda7adce5fb3bb9ca35 (diff)
downloadblackbird-op-linux-4ec3eb13634529c0bc7466658d84d0bbe3244aea.tar.gz
blackbird-op-linux-4ec3eb13634529c0bc7466658d84d0bbe3244aea.zip
Merge branch 'smp' into misc
Conflicts: arch/arm/kernel/entry-armv.S arch/arm/mm/ioremap.c
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mach-vexpress/hotplug.c128
-rw-r--r--arch/arm/mach-vexpress/include/mach/smp.h5
-rw-r--r--arch/arm/mach-vexpress/platsmp.c74
5 files changed, 159 insertions, 51 deletions
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 1b71b77ade22..2c0ac7de2814 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -5,4 +5,5 @@
obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 26a02eb57571..f4455e3ed6a4 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -53,7 +53,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
static void __init ct_ca9x4_map_io(void)
{
+#ifdef CONFIG_LOCAL_TIMERS
twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
}
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
new file mode 100644
index 000000000000..ea4cbfb90a66
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -0,0 +1,128 @@
+/*
+ * linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ /*
+ * here's the WFI
+ */
+ asm(".word 0xe320f003\n"
+ :
+ :
+ : "memory", "cc");
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
index 5a6da4fd247e..4c05e4a9713a 100644
--- a/arch/arm/mach-vexpress/include/mach/smp.h
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -2,13 +2,12 @@
#define __MACH_SMP_H
#include <asm/hardware/gic.h>
-#include <asm/smp_mpidr.h>
/*
* We use IRQ1 as the IPI
*/
-static inline void smp_cross_call(const struct cpumask *mask)
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
{
- gic_raise_softirq(mask, 1);
+ gic_raise_softirq(mask, ipi);
}
#endif
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index 670970699ba9..8ce9fef29555 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <asm/unified.h>
@@ -35,6 +34,19 @@ extern void vexpress_secondary_startup(void);
*/
volatile int __cpuinitdata pen_release = -1;
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
static void __iomem *scu_base_addr(void)
{
return MMIO_P2V(A9_MPCORE_SCU);
@@ -44,8 +56,6 @@ static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- trace_hardirqs_off();
-
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@@ -57,8 +67,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
- pen_release = -1;
- smp_wmb();
+ write_pen_release(-1);
/*
* Synchronise with the boot thread.
@@ -83,16 +92,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* since we haven't sent them a soft interrupt, they shouldn't
* be there.
*/
- pen_release = cpu;
- __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
- outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+ write_pen_release(cpu);
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
- smp_cross_call(cpumask_of(cpu));
+ smp_cross_call(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -124,13 +131,6 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores == 0) {
- printk(KERN_ERR
- "vexpress: strange CM count of 0? Default to 1\n");
-
- ncores = 1;
- }
-
if (ncores > NR_CPUS) {
printk(KERN_WARNING
"vexpress: no. of cores (%d) greater than configured "
@@ -143,20 +143,10 @@ void __init smp_init_cpus(void)
set_cpu_possible(i, true);
}
-void __init smp_prepare_cpus(unsigned int max_cpus)
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned int ncores = num_possible_cpus();
- unsigned int cpu = smp_processor_id();
int i;
- smp_store_cpu_info(cpu);
-
- /*
- * are we trying to boot more cores than exist?
- */
- if (max_cpus > ncores)
- max_cpus = ncores;
-
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
@@ -164,27 +154,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
+ scu_enable(scu_base_addr());
+
/*
- * Initialise the SCU if there are more than one CPU and let
- * them know where to start.
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
*/
- if (max_cpus > 1) {
- /*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- scu_enable(scu_base_addr());
-
- /*
- * Write the address of secondary startup into the
- * system-wide flags register. The boot monitor waits
- * until it receives a soft interrupt, and then the
- * secondary CPU branches to this address.
- */
- writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
- writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
- MMIO_P2V(V2M_SYS_FLAGSSET));
- }
+ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
+ writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
+ MMIO_P2V(V2M_SYS_FLAGSSET));
}
OpenPOWER on IntegriCloud