summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-ia64/percpu.h10
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-s390/percpu.h7
-rw-r--r--include/asm-sparc64/percpu.h7
-rw-r--r--include/asm-x86_64/percpu.h7
8 files changed, 59 insertions, 0 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d984a9041436..d85172e9ed45 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void); \
@@ -34,6 +39,9 @@ do { \
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 84155eb67f1d..a2b09ed852ad 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,3 +245,11 @@
*(.initcall7.init) \
*(.initcall7s.init)
+#define PERCPU(align) \
+ . = ALIGN(align); \
+ __per_cpu_start = .; \
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ *(.data.percpu) \
+ *(.data.percpu.shared_aligned) \
+ } \
+ __per_cpu_end = .;
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b5d5ac..a7ebd436f3cc 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index fbe5cf3ab8dc..43a7aac414e0 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -29,6 +29,16 @@
__attribute__((__section__(".data.percpu"))) \
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+#else
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+#endif
+
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 2f2e3024fa61..73dc8ba4010d 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -20,6 +20,11 @@
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void);
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 9ea7f1023e57..545857e64443 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -41,6 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
__attribute__((__section__(".data.percpu"))) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
@@ -59,6 +64,8 @@ do { \
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define __get_cpu_var(var) __reloc_hide(var,0)
#define __raw_get_cpu_var(var) __reloc_hide(var,0)
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 88db872ce2f8..caf8750792ff 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -18,6 +18,11 @@ extern unsigned long __per_cpu_shift;
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
register unsigned long __local_per_cpu_offset asm("g5");
/* var is in discarded region: offset to particular copy we want */
@@ -38,6 +43,8 @@ do { \
#define real_setup_per_cpu_areas() do { } while (0)
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index c6fbb67eac90..5abd48270101 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -20,6 +20,11 @@
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_internodealigned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void); \
@@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void);
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
OpenPOWER on IntegriCloud