summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-04-23 21:44:42 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 14:06:22 +0200
commitd835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab (patch)
tree4bee7d7516eb8f382d137cde96b46fe2b6eea442
parentf8a570e270bf62363cd498ac2ac8ea07a76ad4d6 (diff)
downloadtalos-obmc-linux-d835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab.tar.gz
talos-obmc-linux-d835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab.zip
blackfin: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. TODO: use inline asm or at least asm macros to collapse the lot. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/blackfin/include/asm/atomic.h28
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/mach-bf561/atomic.S30
3 files changed, 40 insertions, 25 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..eafa55b81a7b 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,33 @@
#include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
+
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+
+#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_and(~mask, v);
+}
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
#endif
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591b961d..a401c27b69b4 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(__raw_atomic_update_asm);
-EXPORT_SYMBOL(__raw_atomic_clear_asm);
-EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_and_asm);
+EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm);
+
EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8e8c4c..26fccb5568b9 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr
* r1 = value
*
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
p1 = r0;
- r3 = ~r1;
+ r3 = r1;
[--sp] = rets;
call _get_core_lock;
r2 = [p1];
@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
/*
* r0 = ptr
* r1 = mask
*
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
* atomically.
* Clobbers: r3:0, p1:0
*/
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
p1 = r0;
r3 = r1;
[--sp] = rets;
@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3;
rets = [sp++];
rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
/*
* r0 = ptr
@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1;
r1 = 1;
r1 <<= r2;
- jump ___raw_atomic_set_asm
+ jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm)
/*
@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0
*/
ENTRY(___raw_bit_clear_asm)
- r2 = r1;
- r1 = 1;
- r1 <<= r2;
- jump ___raw_atomic_clear_asm
+ r2 = 1;
+ r2 <<= r1;
+ r1 = ~r2;
+ jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm)
/*
OpenPOWER on IntegriCloud