From c32ffce0f66e5d1d4856254516e24f5ef275cd00 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 21 Feb 2014 17:01:48 +0100 Subject: ARM: 7984/1: prefetch: add prefetchw invocations for barriered atomics After a bunch of benchmarking on the interaction between dmb and pldw, it turns out that issuing the pldw *after* the dmb instruction can give modest performance gains (~3% atomic_add_return improvement on a dual A15). This patch adds prefetchw invocations to our barriered atomic operations including cmpxchg, test_and_xxx and futexes. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/atomic.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include/asm/atomic.h') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 62d2cb53b069..6e410090896e 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" @@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%3]\n" @@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -299,6 +302,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -340,6 +344,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -364,6 +369,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -388,6 +394,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) unsigned long tmp; smp_mb(); + prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" @@ -409,6 +416,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" @@ -436,6 +444,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) int ret = 1; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" -- cgit v1.2.1