summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h13
-rw-r--r--arch/arm/include/asm/atomic.h61
-rw-r--r--arch/arm/include/asm/cache.h16
-rw-r--r--arch/arm/include/asm/flat.h3
-rw-r--r--arch/arm/include/asm/hardware/gic.h2
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/smp.h12
-rw-r--r--arch/arm/include/asm/system.h176
8 files changed, 262 insertions, 28 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6116e4893c0a..15f8a092b700 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
+
+/*
+ * SMP data memory barrier
+ */
+ .macro smp_dmb
+#ifdef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7
+ dmb
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c10, 5 @ dmb
+#endif
+#endif
+ .endm
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index ee99723b3a6c..16b52f397983 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
+"1: ldrex %0, [%2]\n"
+" add %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
@@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
+"1: ldrex %0, [%2]\n"
+" sub %0, %0, %3\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+}
+
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
+ smp_mb();
+
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
@@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
+ smp_mb();
+
return result;
}
@@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;
+ smp_mb();
+
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
@@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);
+ smp_mb();
+
return oldval;
}
@@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
+#define atomic_add(i, v) (void) atomic_add_return(i, v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
@@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
+#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-#define atomic_inc(v) (void) atomic_add_return(1, v)
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
-#define atomic_dec(v) (void) atomic_sub_return(1, v)
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
@@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-/* Atomic operations are already serializing on ARM */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index cb7a9e97fd7e..feaa75f0013e 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -7,4 +7,20 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+/*
+ * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+ */
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
#endif
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index 1d77e51907f6..59426a4595c9 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -5,9 +5,6 @@
#ifndef __ARM_FLAT_H__
#define __ARM_FLAT_H__
-/* An odd number of words will be pushed after this alignment, so
- deliberately misalign the value. */
-#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 4924914af188..7f34333bb545 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -36,7 +36,7 @@
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
#endif
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index e6eb8a67b807..7b522770f29d 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -202,13 +202,6 @@ typedef struct page *pgtable_t;
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-/*
- * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
- */
-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
-#define ARCH_SLAB_MINALIGN 8
-#endif
-
#include <asm-generic/page.h>
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index fad70da5911d..5995935338e1 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -53,17 +53,12 @@ extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
-extern void smp_cross_call(cpumask_t callmap);
-
-/*
- * Broadcast a timer interrupt to the other CPUs.
- */
-extern void smp_send_timer(void);
+extern void smp_cross_call(const struct cpumask *mask);
/*
* Broadcast a clock event to other CPUs.
*/
-extern void smp_timer_broadcast(cpumask_t mask);
+extern void smp_timer_broadcast(const struct cpumask *mask);
/*
* Boot a secondary CPU, and assign it the specified idle task.
@@ -102,7 +97,8 @@ extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* Local timer interrupt handling function (can be IPI'ed).
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index bd4dc8ed53d5..d65b2f5bf41f 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
+ smp_mb();
+
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
+ smp_mb();
return ret;
}
@@ -316,6 +319,12 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg-local.h>
+#if __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_SMP
+#error "SMP is not supported on this platform"
+#endif
+
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
@@ -329,6 +338,173 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg.h>
#endif
+#else /* __LINUX_ARM_ARCH__ >= 6 */
+
+extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+/*
+ * cmpxchg only support 32-bits operands on ARMv6.
+ */
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long oldval, res;
+
+ switch (size) {
+#ifdef CONFIG_CPU_32v6K
+ case 1:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexb %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexbeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ case 2:
+ do {
+ asm volatile("@ __cmpxchg1\n"
+ " ldrexh %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexheq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+#endif /* CONFIG_CPU_32v6K */
+ case 4:
+ do {
+ asm volatile("@ __cmpxchg4\n"
+ " ldrex %1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " strexeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (old), "r" (new)
+ : "memory", "cc");
+ } while (res);
+ break;
+ default:
+ __bad_cmpxchg(ptr, size);
+ oldval = 0;
+ }
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long ret;
+
+ smp_mb();
+ ret = __cmpxchg(ptr, old, new, size);
+ smp_mb();
+
+ return ret;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+#ifndef CONFIG_CPU_32v6K
+ case 1:
+ case 2:
+ ret = __cmpxchg_local_generic(ptr, old, new, size);
+ break;
+#endif /* !CONFIG_CPU_32v6K */
+ default:
+ ret = __cmpxchg(ptr, old, new, size);
+ }
+
+ return ret;
+}
+
+#define cmpxchg_local(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+
+#ifdef CONFIG_CPU_32v6K
+
+/*
+ * Note : ARMv7-M (currently unsupported by Linux) does not support
+ * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
+ * not be allowed to use __cmpxchg64.
+ */
+static inline unsigned long long __cmpxchg64(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ register unsigned long long oldval asm("r0");
+ register unsigned long long __old asm("r2") = old;
+ register unsigned long long __new asm("r4") = new;
+ unsigned long res;
+
+ do {
+ asm volatile(
+ " @ __cmpxchg8\n"
+ " ldrexd %1, %H1, [%2]\n"
+ " mov %0, #0\n"
+ " teq %1, %3\n"
+ " teqeq %H1, %H3\n"
+ " strexdeq %0, %4, %H4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (ptr), "Ir" (__old), "r" (__new)
+ : "memory", "cc");
+ } while (res);
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
+#define cmpxchg64(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr,o,n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
+
+#else /* !CONFIG_CPU_32v6K */
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#endif /* CONFIG_CPU_32v6K */
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
OpenPOWER on IntegriCloud