diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-06-28 22:33:13 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-06-28 22:33:24 +0200 |
commit | f384c954c9fe3d3c6fce5ae66b67f2ddd947d098 (patch) | |
tree | a38541b8083a2304435e9a153d408bd7cd44116e /mm | |
parent | 9a15a07fe2175dc25cd928a354b3839f562ac8cc (diff) | |
parent | 5904b3b81d25166e5e39b9727645bb47937618e3 (diff) | |
download | talos-obmc-linux-f384c954c9fe3d3c6fce5ae66b67f2ddd947d098.tar.gz talos-obmc-linux-f384c954c9fe3d3c6fce5ae66b67f2ddd947d098.zip |
Merge branch 'linus' into perf/core
Reason: Further changes conflict with upstream fixes
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/percpu.c | 36 |
1 files changed, 30 insertions, 6 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 39f7dfd59585..6470e7710231 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -229,8 +229,8 @@ static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } -static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk, - unsigned int cpu, int page_idx) +static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, + unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); @@ -978,7 +978,32 @@ bool is_kernel_percpu_address(unsigned long addr) */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { - if (pcpu_addr_in_first_chunk(addr)) { + void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); + bool in_first_chunk = false; + unsigned long first_start, first_end; + unsigned int cpu; + + /* + * The following test on first_start/end isn't strictly + * necessary but will speed up lookups of addresses which + * aren't in the first chunk. + */ + first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); + first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, + pcpu_unit_pages); + if ((unsigned long)addr >= first_start && + (unsigned long)addr < first_end) { + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(base, cpu); + + if (addr >= start && addr < start + pcpu_unit_size) { + in_first_chunk = true; + break; + } + } + } + + if (in_first_chunk) { if ((unsigned long)addr < VMALLOC_START || (unsigned long)addr >= VMALLOC_END) return __pa(addr); @@ -1086,7 +1111,7 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; - int group_cnt_max = 0, nr_groups = 1, nr_units = 0; + int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ int last_allocs, group, unit; @@ -1096,7 +1121,7 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); - memset(group_cnt, 0, sizeof(group_map)); + memset(group_cnt, 0, sizeof(group_cnt)); /* * Determine min_unit_size, alloc_size and max_upa such that @@ -1130,7 +1155,6 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( } group_map[cpu] = group; group_cnt[group]++; - group_cnt_max = max(group_cnt_max, group_cnt[group]); } /* |