summaryrefslogtreecommitdiffstats
path: root/arch/frv/include/asm/atomic.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 15:46:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 15:46:07 -0700
commitca520cab25e0e8da717c596ccaa2c2b3650cfa09 (patch)
tree883eb497642d98635817f9cf954ac98e043fb573 /arch/frv/include/asm/atomic.h
parent4c12ab7e5e2e892fa94df500f96001837918a281 (diff)
parentd420acd816c07c7be31bd19d09cbcb16e5572fa6 (diff)
downloadblackbird-op-linux-ca520cab25e0e8da717c596ccaa2c2b3650cfa09.tar.gz
blackbird-op-linux-ca520cab25e0e8da717c596ccaa2c2b3650cfa09.zip
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and atomic updates from Ingo Molnar: "Main changes in this cycle are: - Extend atomic primitives with coherent logic op primitives (atomic_{or,and,xor}()) and deprecate the old partial APIs (atomic_{set,clear}_mask()) The old ops were incoherent with incompatible signatures across architectures and with incomplete support. Now every architecture supports the primitives consistently (by Peter Zijlstra) - Generic support for 'relaxed atomics': - _acquire/release/relaxed() flavours of xchg(), cmpxchg() and {add,sub}_return() - atomic_read_acquire() - atomic_set_release() This came out of porting qwrlock code to arm64 (by Will Deacon) - Clean up the fragile static_key APIs that were causing repeat bugs, by introducing a new one: DEFINE_STATIC_KEY_TRUE(name); DEFINE_STATIC_KEY_FALSE(name); which define a key of different types with an initial true/false value. Then allow: static_branch_likely() static_branch_unlikely() to take a key of either type and emit the right instruction for the case. To be able to know the 'type' of the static key we encode it in the jump entry (by Peter Zijlstra) - Static key self-tests (by Jason Baron) - qrwlock optimizations (by Waiman Long) - small futex enhancements (by Davidlohr Bueso) - ... and misc other changes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (63 commits) jump_label/x86: Work around asm build bug on older/backported GCCs locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations locking, include/llist: Use linux/atomic.h instead of asm/cmpxchg.h locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics locking/qrwlock: Implement queue_write_unlock() using smp_store_release() locking/lockref: Remove homebrew cmpxchg64_relaxed() macro definition locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t' locking, asm-generic: Rework atomic-long.h to avoid bulk code duplication locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations locking, compiler.h: Cast away attributes in the WRITE_ONCE() magic locking/static_keys: Make verify_keys() static jump label, locking/static_keys: Update docs locking/static_keys: Provide a selftest jump_label: Provide a self-test s390/uaccess, locking/static_keys: employ static_branch_likely() x86, tsc, locking/static_keys: Employ static_branch_likely() locking/static_keys: Add selftest locking/static_keys: Add a new static_key interface locking/static_keys: Rework update logic locking/static_keys: Add static_key_{en,dis}able() helpers ...
Diffstat (limited to 'arch/frv/include/asm/atomic.h')
-rw-r--r--arch/frv/include/asm/atomic.h107
1 files changed, 54 insertions, 53 deletions
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a61d65..0da689def4cc 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
#define _ASM_ATOMIC_H
#include <linux/types.h>
-#include <asm/spr-regs.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
@@ -23,6 +22,8 @@
#error not SMP safe
#endif
+#include <asm/atomic_defs.h>
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
@@ -34,56 +35,26 @@
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_inc_return(atomic_t *v)
{
- unsigned long val;
+ return __atomic_add_return(1, &v->counter);
+}
- asm("0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " add%I2 %1,%2,%1 \n"
- " cst.p %1,%M0 ,cc3,#1 \n"
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
- " beq icc3,#0,0b \n"
- : "+U"(v->counter), "=&r"(val)
- : "NPr"(i)
- : "memory", "cc7", "cc3", "icc3"
- );
+static inline int atomic_dec_return(atomic_t *v)
+{
+ return __atomic_sub_return(1, &v->counter);
+}
- return val;
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ return __atomic_add_return(i, &v->counter);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
- unsigned long val;
-
- asm("0: \n"
- " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
- " ckeq icc3,cc7 \n"
- " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
- " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
- " sub%I2 %1,%2,%1 \n"
- " cst.p %1,%M0 ,cc3,#1 \n"
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
- " beq icc3,#0,0b \n"
- : "+U"(v->counter), "=&r"(val)
- : "NPr"(i)
- : "memory", "cc7", "cc3", "icc3"
- );
-
- return val;
+ return __atomic_sub_return(i, &v->counter);
}
-#else
-
-extern int atomic_add_return(int i, atomic_t *v);
-extern int atomic_sub_return(int i, atomic_t *v);
-
-#endif
-
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline void atomic_inc(atomic_t *v)
{
- atomic_add_return(1, v);
+ atomic_inc_return(v);
}
static inline void atomic_dec(atomic_t *v)
{
- atomic_sub_return(1, v);
+ atomic_dec_return(v);
}
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
* 64-bit atomic ops
*/
typedef struct {
- volatile long long counter;
+ long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
-static inline long long atomic64_read(atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
{
long long counter;
asm("ldd%I1 %M1,%0"
: "=e"(counter)
: "m"(v->counter));
+
return counter;
}
@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "e"(i));
}
-extern long long atomic64_inc_return(atomic64_t *v);
-extern long long atomic64_dec_return(atomic64_t *v);
-extern long long atomic64_add_return(long long i, atomic64_t *v);
-extern long long atomic64_sub_return(long long i, atomic64_t *v);
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ return __atomic64_add_return(1, &v->counter);
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ return __atomic64_sub_return(1, &v->counter);
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ return __atomic64_add_return(i, &v->counter);
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ return __atomic64_sub_return(i, &v->counter);
+}
static inline long long atomic64_add_negative(long long i, atomic64_t *v)
{
@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
+
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ (void)__atomic32_fetch_##op(i, &v->counter); \
+} \
+ \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ (void)__atomic64_fetch_##op(i, &v->counter); \
+}
+
+ATOMIC_OP(or)
+ATOMIC_OP(and)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
#endif /* _ASM_ATOMIC_H */
OpenPOWER on IntegriCloud