summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c32
3 files changed, 29 insertions, 30 deletions
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 61a3f4132087..9cc5a52711e5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
* cpu_present_map
- * cpu_sibling_map
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
*/
cpu_init_thread_core_maps(nthreads);
}
-
-/*
- * Being that cpu_sibling_map is now a per_cpu array, then it cannot
- * be initialized until the per_cpu areas have been created. This
- * function is now called from setup_per_cpu_areas().
- */
-void __init smp_setup_cpu_sibling_map(void)
-{
-#ifdef CONFIG_PPC64
- int i, cpu, base;
-
- for_each_possible_cpu(cpu) {
- DBG("Sibling map for CPU %d:", cpu);
- base = cpu_first_thread_in_core(cpu);
- for (i = 0; i < threads_per_core; i++) {
- cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
- DBG(" %d", base + i);
- }
- DBG("\n");
- }
-
-#endif /* CONFIG_PPC64 */
-}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PCSPKR_PLATFORM
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04d8de9f0fc6..8b25f51f03bf 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
paca[i].data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
-
- /* Now that per_cpu is setup, initialize cpu_sibling_map */
- smp_setup_cpu_sibling_map();
}
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f5ae9fa222ea..3c4d07e5e06a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -41,6 +41,7 @@
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
+#include <asm/cputhreads.h>
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/mpic.h>
@@ -228,6 +229,7 @@ void __devinit smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != boot_cpuid);
cpu_set(boot_cpuid, cpu_online_map);
+ cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
@@ -380,6 +382,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
int __devinit start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
+ int i, base;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
@@ -400,6 +403,14 @@ int __devinit start_secondary(void *unused)
ipi_call_lock();
cpu_set(cpu, cpu_online_map);
+ /* Update sibling maps */
+ base = cpu_first_thread_in_core(cpu);
+ for (i = 0; i < threads_per_core; i++) {
+ if (cpu_is_offline(base + i))
+ continue;
+ cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
+ cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+ }
ipi_call_unlock();
local_irq_enable();
@@ -437,10 +448,25 @@ void __init smp_cpus_done(unsigned int max_cpus)
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
- if (smp_ops->cpu_disable)
- return smp_ops->cpu_disable();
+ int cpu = smp_processor_id();
+ int base, i;
+ int err;
- return -ENOSYS;
+ if (!smp_ops->cpu_disable)
+ return -ENOSYS;
+
+ err = smp_ops->cpu_disable();
+ if (err)
+ return err;
+
+ /* Update sibling maps */
+ base = cpu_first_thread_in_core(cpu);
+ for (i = 0; i < threads_per_core; i++) {
+ cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
+ cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
+ }
+
+ return 0;
}
void __cpu_die(unsigned int cpu)
OpenPOWER on IntegriCloud