diff options
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 26 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/percpu.h | 56 |
3 files changed, 83 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0420418f317a..e29b275e09e0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include <linux/unistd.h> #include <linux/serial.h> #include <linux/serial_8250.h> +#include <linux/bootmem.h> #include <asm/io.h> #include <asm/kdump.h> #include <asm/prom.h> @@ -654,3 +655,28 @@ void cpu_die(void) if (ppc_md.cpu_die) ppc_md.cpu_die(); } + +#ifdef CONFIG_SMP +void __init setup_per_cpu_areas(void) +{ + int i; + unsigned long size; + char *ptr; + + /* Copy section for each CPU (we discard the original) */ + size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); +#ifdef CONFIG_MODULES + if (size < PERCPU_ENOUGH_ROOM) + size = PERCPU_ENOUGH_ROOM; +#endif + + for_each_cpu(i) { + ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); + if (!ptr) + panic("Cannot allocate cpu data for CPU %d\n", i); + + paca[i].data_offset = ptr - __per_cpu_start; + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + } +} +#endif diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 3ae52d9dc9ff..a64b4d425dab 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -65,6 +65,7 @@ struct paca_struct { u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ void *emergency_sp; /* pointer to emergency stack */ + u64 data_offset; /* per cpu data offset */ s16 hw_cpu_id; /* Physical processor number */ u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */ diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 06a959d67234..e31922c50e53 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -1 +1,57 @@ +#ifndef _ASM_POWERPC_PERCPU_H_ +#define _ASM_POWERPC_PERCPU_H_ +#ifdef __powerpc64__ +#include <linux/compiler.h> + +/* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the paca. Based on the x86-64 implementation. + */ + +#ifdef CONFIG_SMP + +#include <asm/paca.h> + +#define __per_cpu_offset(cpu) (paca[cpu].data_offset) +#define __my_cpu_offset() get_paca()->data_offset + +/* Separate out the type, so (int[3], foo) works. */ +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) +#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) + +/* A macro to avoid #include hell... */ +#define percpu_modcopy(pcpudst, src, size) \ +do { \ + unsigned int __i; \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ +} while (0) + +extern void setup_per_cpu_areas(void); + +#else /* ! SMP */ + +#define DEFINE_PER_CPU(type, name) \ + __typeof__(type) per_cpu__##name + +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) +#define __get_cpu_var(var) per_cpu__##var + +#endif /* SMP */ + +#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name + +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) + +#else #include <asm-generic/percpu.h> +#endif + +#endif /* _ASM_POWERPC_PERCPU_H_ */ |