diff options
author | travis@sgi.com <travis@sgi.com> | 2008-01-30 13:33:21 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:33:21 +0100 |
commit | 834beda15ecc43c110c0a6ac39ec1aa79f891716 (patch) | |
tree | 56f00ac3e1b06083c5be928dc740369a022b76fa | |
parent | ac72e7888a612dccfbc15b34698aad441bdfda10 (diff) | |
download | blackbird-op-linux-834beda15ecc43c110c0a6ac39ec1aa79f891716.tar.gz blackbird-op-linux-834beda15ecc43c110c0a6ac39ec1aa79f891716.zip |
x86: change NR_CPUS arrays in numa_64 fixup
Change the following static arrays sized by NR_CPUS to
per_cpu data variables:
char cpu_to_node_map[NR_CPUS];
fixup:
- Split cpu_to_node function into "early" and "late" versions
so that x86_cpu_to_node_map_early_ptr is not EXPORT'ed and
the cpu_to_node inline function is more streamlined.
- This also involves setting up the percpu maps as early as possible.
- Fix X86_32 NUMA build errors that previous version of this
patch caused.
V2->V3:
- add early_cpu_to_node function to keep cpu_to_node efficient
- move and rename smp_set_apicids() to setup_percpu_maps()
- call setup_percpu_maps() as early as possible
V1->V2:
- Removed extraneous casts
- Fix !NUMA builds with '#ifdef CONFIG_NUMA"
Signed-off-by: Mike Travis <travis@sgi.com>
Reviewed-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/setup64.c | 41 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 34 | ||||
-rw-r--r-- | arch/x86/mm/srat_64.c | 5 | ||||
-rw-r--r-- | include/asm-x86/topology.h | 23 |
5 files changed, 66 insertions, 39 deletions
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index 8fa0de810d0b..855ec82e4f76 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -84,6 +84,40 @@ static int __init nonx32_setup(char *str) __setup("noexec32=", nonx32_setup); /* + * Copy data used in early init routines from the initial arrays to the + * per cpu data areas. These arrays then become expendable and the *_ptrs + * are zeroed indicating that the static arrays are gone. + */ +void __init setup_percpu_maps(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { +#ifdef CONFIG_SMP + if (per_cpu_offset(cpu)) { +#endif + per_cpu(x86_cpu_to_apicid, cpu) = + x86_cpu_to_apicid_init[cpu]; +#ifdef CONFIG_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + x86_cpu_to_node_map_init[cpu]; +#endif +#ifdef CONFIG_SMP + } + else + printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", + cpu); +#endif + } + + /* indicate the early static arrays are gone */ + x86_cpu_to_apicid_early_ptr = NULL; +#ifdef CONFIG_NUMA + x86_cpu_to_node_map_early_ptr = NULL; +#endif +} + +/* * Great future plan: * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. * Always point %gs to its beginning @@ -104,18 +138,21 @@ void __init setup_per_cpu_areas(void) for_each_cpu_mask (i, cpu_possible_map) { char *ptr; - if (!NODE_DATA(cpu_to_node(i))) { + if (!NODE_DATA(early_cpu_to_node(i))) { printk("cpu with no node %d, num_online_nodes %d\n", i, num_online_nodes()); ptr = alloc_bootmem_pages(size); } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); + ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size); } if (!ptr) panic("Cannot allocate cpu data for CPU %d\n", i); cpu_pda(i)->data_offset = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); } + + /* setup percpu data maps early */ + setup_percpu_maps(); } void pda_init(int cpu) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 915ec6267326..50232d476a27 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -465,7 +465,7 @@ cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; EXPORT_SYMBOL(node_to_cpumask_map); /* which node each logical CPU is on */ -int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; +u8 cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; EXPORT_SYMBOL(cpu_to_node_map); /* set up a mapping between cpu and node. */ diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 93071cdf0849..4e14ecb90764 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -851,39 +851,6 @@ static int __init smp_sanity_check(unsigned max_cpus) return 0; } -/* - * Copy data used in early init routines from the initial arrays to the - * per cpu data areas. These arrays then become expendable and the - * *_ptrs are zeroed indicating that the static arrays are gone. - */ -void __init smp_set_apicids(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - if (per_cpu_offset(cpu)) { - per_cpu(x86_cpu_to_apicid, cpu) = - x86_cpu_to_apicid_init[cpu]; -#ifdef CONFIG_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - x86_cpu_to_node_map_init[cpu]; -#endif - per_cpu(x86_bios_cpu_apicid, cpu) = - x86_bios_cpu_apicid_init[cpu]; - } - else - printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", - cpu); - } - - /* indicate the early static arrays are gone */ - x86_cpu_to_apicid_early_ptr = NULL; -#ifdef CONFIG_NUMA - x86_cpu_to_node_map_early_ptr = NULL; -#endif - x86_bios_cpu_apicid_early_ptr = NULL; -} - static void __init smp_cpu_index_default(void) { int i; @@ -906,7 +873,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_cpu_index_default(); current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ - smp_set_apicids(); set_cpu_sibling_map(0); if (smp_sanity_check(max_cpus) < 0) { diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index e5a1ec8342dc..04cbecaeca81 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -382,9 +382,10 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) setup_node_bootmem(i, nodes[i].start, nodes[i].end); for (i = 0; i < NR_CPUS; i++) { - if (cpu_to_node(i) == NUMA_NO_NODE) + int node = cpu_to_node(i); + if (node == NUMA_NO_NODE) continue; - if (!node_isset(cpu_to_node(i), node_possible_map)) + if (!node_isset(node, node_possible_map)) numa_set_node(i, NUMA_NO_NODE); } numa_init_array(); diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 2da1464ecbef..040374f030cf 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -30,16 +30,30 @@ #include <asm/mpspec.h> /* Mappings between logical cpu number and node number */ +#ifdef CONFIG_X86_32 +extern u8 cpu_to_node_map[]; + +#else DECLARE_PER_CPU(u16, x86_cpu_to_node_map); extern u16 x86_cpu_to_node_map_init[]; extern void *x86_cpu_to_node_map_early_ptr; +#endif + extern cpumask_t node_to_cpumask_map[]; #define NUMA_NO_NODE ((u16)(~0)) /* Returns the number of the node containing CPU 'cpu' */ +#ifdef CONFIG_X86_32 +#define early_cpu_to_node(cpu) cpu_to_node(cpu) static inline int cpu_to_node(int cpu) { + return cpu_to_node_map[cpu]; +} + +#else /* CONFIG_X86_64 */ +static inline int early_cpu_to_node(int cpu) +{ u16 *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; if (cpu_to_node_map) @@ -50,6 +64,15 @@ static inline int cpu_to_node(int cpu) return NUMA_NO_NODE; } +static inline int cpu_to_node(int cpu) +{ + if(per_cpu_offset(cpu)) + return per_cpu(x86_cpu_to_node_map, cpu); + else + return NUMA_NO_NODE; +} +#endif /* CONFIG_X86_64 */ + /* * Returns the number of the node containing Node 'node'. This * architecture is flat, so it is a pretty simple function! |