summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 17:31:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 17:31:36 -0700
commit0fc0531e0a2174377a86fd6953ecaa00287d8f70 (patch)
treeafe56978729300df96b002a064c9de927fadcfab /include
parent91b745016c12d440386c40fb76ab69c8e08cbc06 (diff)
parent9329ba9704f6bd51a735982e0d4a3eed72c3294f (diff)
downloadblackbird-op-linux-0fc0531e0a2174377a86fd6953ecaa00287d8f70.tar.gz
blackbird-op-linux-0fc0531e0a2174377a86fd6953ecaa00287d8f70.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: update comments to reflect that percpu allocations are always zero-filled percpu: Optimize __get_cpu_var() x86, percpu: Optimize this_cpu_ptr percpu: clear memory allocated with the km allocator percpu: fix build breakage on s390 and cleanup build configuration tests percpu: use percpu allocator on UP too percpu: reduce PCPU_MIN_UNIT_SIZE to 32k vmalloc: pcpu_get/free_vm_areas() aren't needed on UP Fixed up trivial conflicts in include/linux/percpu.h
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h14
-rw-r--r--include/linux/percpu.h31
-rw-r--r--include/linux/vmalloc.h2
3 files changed, 17 insertions, 30 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 08923b684768..d17784ea37ff 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
*/
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
-#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
-#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
-#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#ifndef __this_cpu_ptr
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#else
+#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
+#endif
+#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
+#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 0eb50832aa00..5095b834a6fb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,10 +48,8 @@
preempt_enable(); \
} while (0)
-#ifdef CONFIG_SMP
-
/* minimum unit size, also is the maximum supported allocation size */
-#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
+#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
/*
* Percpu allocator can serve percpu allocations before slab is
@@ -146,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
* dynamically allocated. Non-atomic access to the current CPU's
* version should probably be combined with get_cpu()/put_cpu().
*/
+#ifdef CONFIG_SMP
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+#else
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
+#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool is_kernel_percpu_address(unsigned long addr);
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
+#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
extern void __init setup_per_cpu_areas(void);
#endif
extern void __init percpu_init_late(void);
-#else /* CONFIG_SMP */
-
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
-
-/* can't distinguish from other static vars, always false */
-static inline bool is_kernel_percpu_address(unsigned long addr)
-{
- return false;
-}
-
-static inline void __init setup_per_cpu_areas(void) { }
-
-static inline void __init percpu_init_late(void) { }
-
-static inline void *pcpu_lpage_remapped(void *kaddr)
-{
- return NULL;
-}
-
-#endif /* CONFIG_SMP */
-
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 01c2145118dc..63a4fe6d51bd 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+#ifdef CONFIG_SMP
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align, gfp_t gfp_mask);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
+#endif
#endif /* _LINUX_VMALLOC_H */
OpenPOWER on IntegriCloud