diff options
49 files changed, 695 insertions, 1680 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 767bfdd42992..4a6a8f58c9c9 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, new, old; smp_mb(); @@ -235,38 +235,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) smp_mb(); return old; } - +#define atomic_fetch_add_unless atomic_fetch_add_unless /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns true iff @v was not @u. + * Returns the old value of @v. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { - long c, tmp; + long c, new, old; smp_mb(); __asm__ __volatile__( - "1: ldq_l %[tmp],%[mem]\n" - " cmpeq %[tmp],%[u],%[c]\n" - " addq %[tmp],%[a],%[tmp]\n" + "1: ldq_l %[old],%[mem]\n" + " cmpeq %[old],%[u],%[c]\n" + " addq %[old],%[a],%[new]\n" " bne %[c],2f\n" - " stq_c %[tmp],%[mem]\n" - " beq %[tmp],3f\n" + " stq_c %[new],%[mem]\n" + " beq %[new],3f\n" "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" - : [tmp] "=&r"(tmp), [c] "=&r"(c) + : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u) : "memory"); smp_mb(); - return !c; + return old; } +#define atomic64_fetch_add_unless atomic64_fetch_add_unless /* * atomic64_dec_if_positive - decrement by 1 if old value positive @@ -295,31 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) smp_mb(); return old - 1; } - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) - -#define atomic_inc_return(v) atomic_add_return(1,(v)) -#define atomic64_inc_return(v) atomic64_add_return(1,(v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) - -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic64_inc(v) atomic64_add(1,(v)) - -#define atomic_dec(v) atomic_sub(1,(v)) -#define atomic64_dec(v) atomic64_sub(1,(v)) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 11859287c52a..4e0072730241 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot +#define atomic_andnot atomic_andnot +#define atomic_fetch_andnot atomic_fetch_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) -#define atomic_andnot(mask, v) atomic_and(~(mask), (v)) -#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v)) ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) @@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v - */ -#define __atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - \ - /* \ - * Explicit full memory barrier needed before/after as \ - * LLOCK/SCOND thmeselves don't provide any such semantics \ - */ \ - smp_mb(); \ - \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ - c = old; \ - \ - smp_mb(); \ - \ - c; \ -}) - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) - -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) - - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> @@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot +#define atomic64_andnot atomic64_andnot +#define atomic64_fetch_andnot atomic64_fetch_andnot ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) @@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return val; } +#define atomic64_dec_if_positive atomic64_dec_if_positive /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * if (v != u) { v += a; ret = 1} else {ret = 0} - * Returns 1 iff @v was not @u (i.e. if add actually happened) + * Atomically adds @a to @v, if it was not @u. + * Returns the old value of @v */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - long long val; - int op_done; + long long old, temp; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%2] \n" - " mov %1, 1 \n" " brne %L0, %L4, 2f # continue to add since v != u \n" " breq.d %H0, %H4, 3f # return since v == u \n" - " mov %1, 0 \n" "2: \n" - " add.f %L0, %L0, %L3 \n" - " adc %H0, %H0, %H3 \n" - " scondd %0, [%2] \n" + " add.f %L1, %L0, %L3 \n" + " adc %H1, %H0, %H3 \n" + " scondd %1, [%2] \n" " bnz 1b \n" "3: \n" - : "=&r"(val), "=&r" (op_done) + : "=&r"(old), "=&r" (temp) : "r"(&v->counter), "r"(a), "r"(u) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); - return op_done; + return old; } - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 66d0e215a773..f74756641410 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int oldval, newval; unsigned long tmp; @@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +#define atomic_fetch_add_unless atomic_fetch_add_unless #else /* ARM_ARCH_6 */ @@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) - c = old; - return c; -} +#define atomic_fetch_andnot atomic_fetch_andnot #endif /* __LINUX_ARM_ARCH__ */ @@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) - -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v)) -#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) - #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { long long counter; @@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return result; } +#define atomic64_dec_if_positive atomic64_dec_if_positive -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - long long val; + long long oldval, newval; unsigned long tmp; - int ret = 1; smp_mb(); prefetchw(&v->counter); @@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) "1: ldrexd %0, %H0, [%4]\n" " teq %0, %5\n" " teqeq %H0, %H5\n" -" moveq %1, #0\n" " beq 2f\n" -" adds %Q0, %Q0, %Q6\n" -" adc %R0, %R0, %R6\n" -" strexd %2, %0, %H0, [%4]\n" +" adds %Q1, %Q0, %Q6\n" +" adc %R1, %R0, %R6\n" +" strexd %2, %1, %H1, [%4]\n" " teq %2, #0\n" " bne 1b\n" "2:" - : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); - if (ret) + if (oldval != u) smp_mb(); - return ret; + return oldval; } - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..9bca54dda75c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -40,17 +40,6 @@ #include <asm/cmpxchg.h> -#define ___atomic_add_unless(v, a, u, sfx) \ -({ \ - typeof((v)->counter) c, old; \ - \ - c = atomic##sfx##_read(v); \ - while (c != (u) && \ - (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c; \ - }) - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) @@ -61,21 +50,11 @@ #define atomic_add_return_release atomic_add_return_release #define atomic_add_return atomic_add_return -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return_acquire #define atomic_sub_return_release atomic_sub_return_release #define atomic_sub_return atomic_sub_return -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) - #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add_acquire #define atomic_fetch_add_release atomic_fetch_add_release @@ -119,13 +98,6 @@ cmpxchg_release(&((v)->counter), (old), (new)) #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot /* @@ -140,21 +112,11 @@ #define atomic64_add_return_release atomic64_add_return_release #define atomic64_add_return atomic64_add_return -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return_acquire #define atomic64_sub_return_release atomic64_sub_return_release #define atomic64_sub_return atomic64_sub_return -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) - #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -195,16 +157,9 @@ #define atomic64_cmpxchg_release atomic_cmpxchg_release #define atomic64_cmpxchg atomic_cmpxchg -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) -#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) #define atomic64_andnot atomic64_andnot -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif #endif diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 9c19594ce7cb..10d536b1af74 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,22 +17,11 @@ #define __ASM_BITOPS_H #include <linux/compiler.h> -#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif -/* - * Little endian assembly atomic bitops. - */ -extern void set_bit(int nr, volatile unsigned long *p); -extern void clear_bit(int nr, volatile unsigned long *p); -extern void change_bit(int nr, volatile unsigned long *p); -extern int test_and_set_bit(int nr, volatile unsigned long *p); -extern int test_and_clear_bit(int nr, volatile unsigned long *p); -extern int test_and_change_bit(int nr, volatile unsigned long *p); - #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-__fls.h> @@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p); #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> - -/* - * Ext2 is defined to use little-endian byte ordering. - */ -#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 137710f4dac3..68755fd70dcf 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ +lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o \ clear_page.o memchr.o memcpy.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S deleted file mode 100644 index 43ac736baa5b..000000000000 --- a/arch/arm64/lib/bitops.S +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Based on arch/arm/lib/bitops.h - * - * Copyright (C) 2013 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> -#include <asm/lse.h> - -/* - * x0: bits 5:0 bit offset - * bits 31:6 word offset - * x1: address - */ - .macro bitop, name, llsc, lse -ENTRY( \name ) - and w3, w0, #63 // Get bit offset - eor w0, w0, w3 // Clear low bits - mov x2, #1 - add x1, x1, x0, lsr #3 // Get word offset -alt_lse " prfm pstl1strm, [x1]", "nop" - lsl x3, x2, x3 // Create mask - -alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]" -alt_lse " \llsc x2, x2, x3", "nop" -alt_lse " stxr w0, x2, [x1]", "nop" -alt_lse " cbnz w0, 1b", "nop" - - ret -ENDPROC(\name ) - .endm - - .macro testop, name, llsc, lse -ENTRY( \name ) - and w3, w0, #63 // Get bit offset - eor w0, w0, w3 // Clear low bits - mov x2, #1 - add x1, x1, x0, lsr #3 // Get word offset -alt_lse " prfm pstl1strm, [x1]", "nop" - lsl x4, x2, x3 // Create mask - -alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]" - lsr x0, x2, x3 -alt_lse " \llsc x2, x2, x4", "nop" -alt_lse " stlxr w5, x2, [x1]", "nop" -alt_lse " cbnz w5, 1b", "nop" -alt_lse " dmb ish", "nop" - - and x0, x0, #1 - ret -ENDPROC(\name ) - .endm - -/* - * Atomic bit operations. - */ - bitop change_bit, eor, steor - bitop clear_bit, bic, stclr - bitop set_bit, orr, stset - - testop test_and_change_bit, eor, ldeoral - testop test_and_clear_bit, bic, ldclral - testop test_and_set_bit, orr, ldsetal diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 941e7554e886..c6b6a06231b2 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -2,8 +2,10 @@ #ifndef __ARCH_H8300_ATOMIC__ #define __ARCH_H8300_ATOMIC__ +#include <linux/compiler.h> #include <linux/types.h> #include <asm/cmpxchg.h> +#include <asm/irqflags.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -15,8 +17,6 @@ #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#include <linux/kernel.h> - #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ @@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_inc_return(v) atomic_add_return(1, v) -#define atomic_dec_return(v) atomic_sub_return(1, v) - -#define atomic_inc(v) (void)atomic_inc_return(v) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec(v) (void)atomic_dec_return(v) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; h8300flags flags; @@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) arch_local_irq_restore(flags); return ret; } +#define atomic_fetch_add_unless atomic_fetch_add_unless #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index fb3dfb2a667e..311b9894ccc8 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -164,7 +164,7 @@ ATOMIC_OPS(xor) #undef ATOMIC_OP /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer to value * @a: amount to add * @u: unless value is equal to u @@ -173,7 +173,7 @@ ATOMIC_OPS(xor) * */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int __oldval; register int tmp; @@ -196,18 +196,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ); return __oldval; } - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) - -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) -#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) - -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_fetch_add_unless atomic_fetch_add_unless #endif diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 2524fb60fbc2..206530d0751b 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - - -static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -static __inline__ long atomic64_dec_if_positive(atomic64_t *v) -{ - long c, old, dec; - c = atomic64_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} - -/* - * Atomically add I to V and return TRUE if the resulting value is - * negative. - */ -static __inline__ int -atomic_add_negative (int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} - -static __inline__ long -atomic64_add_negative (__s64 i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} - -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - #define atomic_add(i,v) (void)atomic_add_return((i), (v)) #define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) #define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index e993e2860ee1..47228b0d4163 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -126,11 +126,13 @@ static inline void atomic_inc(atomic_t *v) { __asm__ __volatile__("addql #1,%0" : "+m" (*v)); } +#define atomic_inc atomic_inc static inline void atomic_dec(atomic_t *v) { __asm__ __volatile__("subql #1,%0" : "+m" (*v)); } +#define atomic_dec atomic_dec static inline int atomic_dec_and_test(atomic_t *v) { @@ -138,6 +140,7 @@ static inline int atomic_dec_and_test(atomic_t *v) __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); return c != 0; } +#define atomic_dec_and_test atomic_dec_and_test static inline int atomic_dec_and_test_lt(atomic_t *v) { @@ -155,6 +158,7 @@ static inline int atomic_inc_and_test(atomic_t *v) __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); return c != 0; } +#define atomic_inc_and_test atomic_inc_and_test #ifdef CONFIG_RMW_INSNS @@ -190,9 +194,6 @@ static inline int atomic_xchg(atomic_t *v, int new) #endif /* !CONFIG_RMW_INSNS */ -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - static inline int atomic_sub_and_test(int i, atomic_t *v) { char c; @@ -201,6 +202,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) : ASM_DI (i)); return c != 0; } +#define atomic_sub_and_test atomic_sub_and_test static inline int atomic_add_negative(int i, atomic_t *v) { @@ -210,20 +212,6 @@ static inline int atomic_add_negative(int i, atomic_t *v) : ASM_DI (i)); return c != 0; } - -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} +#define atomic_add_negative atomic_add_negative #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 93b47b1f6fb4..18193419f97d 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -515,12 +515,16 @@ static inline int __fls(int x) #endif +/* Simple test-and-set bit locks */ +#define test_and_set_bit_lock test_and_set_bit +#define clear_bit_unlock clear_bit +#define __clear_bit_unlock clear_bit_unlock + #include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> #endif /* __KERNEL__ */ #endif /* _M68K_BITOPS_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 0ab176bdb8e8..79be687de4ab 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -274,97 +274,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - -/* - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -/* - * atomic_dec_and_test - decrement by 1 and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) - /* * atomic_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic_t */ #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) -/* - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -#define atomic_inc(v) atomic_add(1, (v)) - -/* - * atomic_dec - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -#define atomic_dec(v) atomic_sub(1, (v)) - -/* - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) - #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } @@ -620,99 +535,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns true iff @v was not @u. - */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - -/* - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) - -/* - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - -/* - * atomic64_dec_and_test - decrement by 1 and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) - /* * atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic64_t */ #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) -/* - * atomic64_inc - increment atomic variable - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1. - */ -#define atomic64_inc(v) atomic64_add(1, (v)) - -/* - * atomic64_dec - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1. - */ -#define atomic64_dec(v) atomic64_sub(1, (v)) - -/* - * atomic64_add_negative - add and test if negative - * @v: pointer of type atomic64_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) - #endif /* CONFIG_64BIT */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index 146e1660f00e..b589fac39b92 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -100,7 +100,7 @@ ATOMIC_OP(xor) * * This is often used through atomic_inc_not_zero() */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int old, tmp; @@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return old; } -#define __atomic_add_unless __atomic_add_unless +#define atomic_fetch_add_unless atomic_fetch_add_unless #include <asm-generic/atomic.h> diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index d29f7db53906..f9cd43a39d72 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -16,8 +16,9 @@ #ifndef __ASM_OPENRISC_CMPXCHG_H #define __ASM_OPENRISC_CMPXCHG_H +#include <linux/bits.h> +#include <linux/compiler.h> #include <linux/types.h> -#include <linux/bitops.h> #define __HAVE_ARCH_CMPXCHG 1 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 88bae6676c9b..118953d41763 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -77,30 +77,6 @@ static __inline__ int atomic_read(const atomic_t *v) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - #define ATOMIC_OP(op, c_op) \ static __inline__ void atomic_##op(int i, atomic_t *v) \ { \ @@ -160,28 +136,6 @@ ATOMIC_OPS(xor, ^=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_inc(v) (atomic_add( 1,(v))) -#define atomic_dec(v) (atomic_add( -1,(v))) - -#define atomic_inc_return(v) (atomic_add_return( 1,(v))) -#define atomic_dec_return(v) (atomic_add_return( -1,(v))) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) - #define ATOMIC_INIT(i) { (i) } #ifdef CONFIG_64BIT @@ -264,72 +218,11 @@ atomic64_read(const atomic64_t *v) return READ_ONCE((v)->counter); } -#define atomic64_inc(v) (atomic64_add( 1,(v))) -#define atomic64_dec(v) (atomic64_add( -1,(v))) - -#define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) -#define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) - /* exported interface */ #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -/* - * atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -static inline long atomic64_dec_if_positive(atomic64_t *v) -{ - long c, old, dec; - c = atomic64_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} - #endif /* !CONFIG_64BIT */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 682b3e6a1e21..a0156cb43d1f 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -129,8 +129,6 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - static __inline__ void atomic_inc(atomic_t *v) { int t; @@ -145,6 +143,7 @@ static __inline__ void atomic_inc(atomic_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic_inc atomic_inc static __inline__ int atomic_inc_return_relaxed(atomic_t *v) { @@ -163,16 +162,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v) return t; } -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - static __inline__ void atomic_dec(atomic_t *v) { int t; @@ -187,6 +176,7 @@ static __inline__ void atomic_dec(atomic_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic_dec atomic_dec static __inline__ int atomic_dec_return_relaxed(atomic_t *v) { @@ -218,7 +208,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) /** - * __atomic_add_unless - add unless the number is a given value + * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -226,13 +216,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%1 # __atomic_add_unless\n\ +"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ cmpw 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" @@ -248,6 +238,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return t; } +#define atomic_fetch_add_unless atomic_fetch_add_unless /** * atomic_inc_not_zero - increment unless the number is zero @@ -280,9 +271,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v) } #define atomic_inc_not_zero(v) atomic_inc_not_zero((v)) -#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) - /* * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1, even if @@ -412,8 +400,6 @@ ATOMIC64_OPS(xor, xor) #undef ATOMIC64_OP_RETURN_RELAXED #undef ATOMIC64_OP -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - static __inline__ void atomic64_inc(atomic64_t *v) { long t; @@ -427,6 +413,7 @@ static __inline__ void atomic64_inc(atomic64_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic64_inc atomic64_inc static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) { @@ -444,16 +431,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) return t; } -/* - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - static __inline__ void atomic64_dec(atomic64_t *v) { long t; @@ -467,6 +444,7 @@ static __inline__ void atomic64_dec(atomic64_t *v) : "r" (&v->counter) : "cc", "xer"); } +#define atomic64_dec atomic64_dec static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) { @@ -487,9 +465,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) - /* * Atomically test *v and decrement if it is greater than 0. * The function returns the old value of *v minus 1. @@ -513,6 +488,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) return t; } +#define atomic64_dec_if_positive atomic64_dec_if_positive #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic64_cmpxchg_relaxed(v, o, n) \ @@ -524,7 +500,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -532,13 +508,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { long t; __asm__ __volatile__ ( PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%1 # __atomic_add_unless\n\ +"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\ cmpd 0,%0,%3 \n\ beq 2f \n\ add %0,%2,%0 \n" @@ -551,8 +527,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) : "r" (&v->counter), "r" (a), "r" (u) : "cc", "memory"); - return t != u; + return t; } +#define atomic64_fetch_add_unless atomic64_fetch_add_unless /** * atomic_inc64_not_zero - increment unless the number is zero @@ -582,6 +559,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v) return t1 != 0; } +#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v)) #endif /* __powerpc64__ */ diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 855115ace98c..512b89485790 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -209,130 +209,8 @@ ATOMIC_OPS(xor, xor, i) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN -/* - * The extra atomic operations that are constructed from one of the core - * AMO-based operations above (aside from sub, which is easier to fit above). - * These are required to perform a full barrier, but they're OK this way - * because atomic_*_return is also required to perform a full barrier. - * - */ -#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \ -static __always_inline \ -bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_##func_op##_return(i, v) comp_op I; \ -} - -#ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS(op, func_op, comp_op, I) \ - ATOMIC_OP(op, func_op, comp_op, I, int, ) -#else -#define ATOMIC_OPS(op, func_op, comp_op, I) \ - ATOMIC_OP(op, func_op, comp_op, I, int, ) \ - ATOMIC_OP(op, func_op, comp_op, I, long, 64) -#endif - -ATOMIC_OPS(add_and_test, add, ==, 0) -ATOMIC_OPS(sub_and_test, sub, ==, 0) -ATOMIC_OPS(add_negative, add, <, 0) - -#undef ATOMIC_OP -#undef ATOMIC_OPS - -#define ATOMIC_OP(op, func_op, I, c_type, prefix) \ -static __always_inline \ -void atomic##prefix##_##op(atomic##prefix##_t *v) \ -{ \ - atomic##prefix##_##func_op(I, v); \ -} - -#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \ -static __always_inline \ -c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \ -} \ -static __always_inline \ -c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_fetch_##func_op(I, v); \ -} - -#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \ -static __always_inline \ -c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \ -} \ -static __always_inline \ -c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_fetch_##op(v) c_op I; \ -} - -#ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP( op, asm_op, I, int, ) \ - ATOMIC_FETCH_OP( op, asm_op, I, int, ) \ - ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) -#else -#define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP( op, asm_op, I, int, ) \ - ATOMIC_FETCH_OP( op, asm_op, I, int, ) \ - ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \ - ATOMIC_OP( op, asm_op, I, long, 64) \ - ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \ - ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64) -#endif - -ATOMIC_OPS(inc, add, +, 1) -ATOMIC_OPS(dec, add, +, -1) - -#define atomic_inc_return_relaxed atomic_inc_return_relaxed -#define atomic_dec_return_relaxed atomic_dec_return_relaxed -#define atomic_inc_return atomic_inc_return -#define atomic_dec_return atomic_dec_return - -#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed -#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed -#define atomic_fetch_inc atomic_fetch_inc -#define atomic_fetch_dec atomic_fetch_dec - -#ifndef CONFIG_GENERIC_ATOMIC64 -#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed -#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed -#define atomic64_inc_return atomic64_inc_return -#define atomic64_dec_return atomic64_dec_return - -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed -#define atomic64_fetch_inc atomic64_fetch_inc -#define atomic64_fetch_dec atomic64_fetch_dec -#endif - -#undef ATOMIC_OPS -#undef ATOMIC_OP -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN - -#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \ -static __always_inline \ -bool atomic##prefix##_##op(atomic##prefix##_t *v) \ -{ \ - return atomic##prefix##_##func_op##_return(v) comp_op I; \ -} - -ATOMIC_OP(inc_and_test, inc, ==, 0, ) -ATOMIC_OP(dec_and_test, dec, ==, 0, ) -#ifndef CONFIG_GENERIC_ATOMIC64 -ATOMIC_OP(inc_and_test, inc, ==, 0, 64) -ATOMIC_OP(dec_and_test, dec, ==, 0, 64) -#endif - -#undef ATOMIC_OP - /* This is required to provide a full barrier on success. */ -static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; @@ -349,9 +227,10 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) : "memory"); return prev; } +#define atomic_fetch_add_unless atomic_fetch_add_unless #ifndef CONFIG_GENERIC_ATOMIC64 -static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u) +static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { long prev, rc; @@ -368,27 +247,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u) : "memory"); return prev; } - -static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - return __atomic64_add_unless(v, a, u) != u; -} -#endif - -/* - * The extra atomic operations that are constructed from one of the core - * LR/SC-based operations above. - */ -static __always_inline int atomic_inc_not_zero(atomic_t *v) -{ - return __atomic_add_unless(v, 1, 0); -} - -#ifndef CONFIG_GENERIC_ATOMIC64 -static __always_inline long atomic64_inc_not_zero(atomic64_t *v) -{ - return atomic64_add_unless(v, 1, 0); -} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 4b55532f15c4..fd20ab5d4cf7 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -55,17 +55,9 @@ static inline void atomic_add(int i, atomic_t *v) __atomic_add(i, &v->counter); } -#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) -#define atomic_inc(_v) atomic_add(1, _v) -#define atomic_inc_return(_v) atomic_add_return(1, _v) -#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) -#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) -#define atomic_dec(_v) atomic_sub(1, _v) -#define atomic_dec_return(_v) atomic_sub_return(1, _v) -#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) #define ATOMIC_OPS(op) \ static inline void atomic_##op(int i, atomic_t *v) \ @@ -90,21 +82,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return __atomic_cmpxchg(&v->counter, old, new); } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == u)) - break; - old = atomic_cmpxchg(v, c, c + a); - if (likely(old == c)) - break; - c = old; - } - return c; -} - #define ATOMIC64_INIT(i) { (i) } static inline long atomic64_read(const atomic64_t *v) @@ -168,50 +145,8 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OPS -static inline int atomic64_add_unless(atomic64_t *v, long i, long u) -{ - long c, old; - - c = atomic64_read(v); - for (;;) { - if (unlikely(c == u)) - break; - old = atomic64_cmpxchg(v, c, c + i); - if (likely(old == c)) - break; - c = old; - } - return c != u; -} - -static inline long atomic64_dec_if_positive(atomic64_t *v) -{ - long c, old, dec; - - c = atomic64_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} - -#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) -#define atomic64_inc(_v) atomic64_add(1, _v) -#define atomic64_inc_return(_v) atomic64_add_return(1, _v) -#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v) #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v) #define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v) -#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) -#define atomic64_dec(_v) atomic64_sub(1, _v) -#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) -#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 0fd0099f43cc..f37b95a80232 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -32,44 +32,9 @@ #include <asm/atomic-irq.h> #endif -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - - return c; -} - #endif /* CONFIG_CPU_J2 */ #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h index 1e881f5db659..593a9704782b 100644 --- a/arch/sh/include/asm/cmpxchg-xchg.h +++ b/arch/sh/include/asm/cmpxchg-xchg.h @@ -8,7 +8,8 @@ * This work is licensed under the terms of the GNU GPL, version 2. See the * file "COPYING" in the main directory of this archive for more details. */ -#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/compiler.h> #include <asm/byteorder.h> /* diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index d13ce517f4b9..94c930f0bc62 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -27,17 +27,17 @@ int atomic_fetch_or(int, atomic_t *); int atomic_fetch_xor(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); int atomic_xchg(atomic_t *, int); -int __atomic_add_unless(atomic_t *, int, int); +int atomic_fetch_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); +#define atomic_fetch_add_unless atomic_fetch_add_unless + #define atomic_set_release(v, i) atomic_set((v), (i)) #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) -#define atomic_inc(v) ((void)atomic_add_return( 1, (v))) -#define atomic_dec(v) ((void)atomic_add_return( -1, (v))) #define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) #define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) @@ -46,22 +46,4 @@ void atomic_set(atomic_t *, int); #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) #define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v))) -#define atomic_inc_return(v) (atomic_add_return( 1, (v))) -#define atomic_dec_return(v) (atomic_add_return( -1, (v))) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 28db058d471b..6963482c81d8 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -50,38 +50,6 @@ ATOMIC_OPS(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_dec_return(v) atomic_sub_return(1, v) -#define atomic64_dec_return(v) atomic64_sub_return(1, v) - -#define atomic_inc_return(v) atomic_add_return(1, v) -#define atomic64_inc_return(v) atomic64_add_return(1, v) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) - -#define atomic_inc(v) atomic_add(1, v) -#define atomic64_inc(v) atomic64_add(1, v) - -#define atomic_dec(v) atomic_sub(1, v) -#define atomic64_dec(v) atomic64_sub(1, v) - -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) -#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) - #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) static inline int atomic_xchg(atomic_t *v, int new) @@ -89,42 +57,11 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static inline long atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - long atomic64_dec_if_positive(atomic64_t *v); +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 465a901a0ada..281fa634bb1a 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new) } EXPORT_SYMBOL(atomic_cmpxchg); -int __atomic_add_unless(atomic_t *v, int a, int u) +int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; @@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u) spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } -EXPORT_SYMBOL(__atomic_add_unless); +EXPORT_SYMBOL(atomic_fetch_add_unless); /* Atomic operations are already serializing */ void atomic_set(atomic_t *v, int i) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 0db6bec95489..823fd2f320cf 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -80,6 +80,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) * true if the result is zero, or false for all * other cases. */ +#define arch_atomic_sub_and_test arch_atomic_sub_and_test static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); @@ -91,6 +92,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) * * Atomically increments @v by 1. */ +#define arch_atomic_inc arch_atomic_inc static __always_inline void arch_atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" @@ -103,6 +105,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v) * * Atomically decrements @v by 1. */ +#define arch_atomic_dec arch_atomic_dec static __always_inline void arch_atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" @@ -117,6 +120,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ +#define arch_atomic_dec_and_test arch_atomic_dec_and_test static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); @@ -130,6 +134,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ +#define arch_atomic_inc_and_test arch_atomic_inc_and_test static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); @@ -144,6 +149,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ +#define arch_atomic_add_negative arch_atomic_add_negative static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); @@ -173,9 +179,6 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v) return arch_atomic_add_return(-i, v); } -#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v)) -#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v)) - static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) { return xadd(&v->counter, i); @@ -253,27 +256,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v) return val; } -/** - * __arch_atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns the old value of @v. - */ -static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u) -{ - int c = arch_atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} - #ifdef CONFIG_X86_32 # include <asm/atomic64_32.h> #else diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 92212bf0484f..ef959f02d070 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -158,6 +158,7 @@ static inline long long arch_atomic64_inc_return(atomic64_t *v) "S" (v) : "memory", "ecx"); return a; } +#define arch_atomic64_inc_return arch_atomic64_inc_return static inline long long arch_atomic64_dec_return(atomic64_t *v) { @@ -166,6 +167,7 @@ static inline long long arch_atomic64_dec_return(atomic64_t *v) "S" (v) : "memory", "ecx"); return a; } +#define arch_atomic64_dec_return arch_atomic64_dec_return /** * arch_atomic64_add - add integer to atomic64 variable @@ -198,25 +200,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v) } /** - * arch_atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v) -{ - return arch_atomic64_sub_return(i, v) == 0; -} - -/** * arch_atomic64_inc - increment atomic64 variable * @v: pointer to type atomic64_t * * Atomically increments @v by 1. */ +#define arch_atomic64_inc arch_atomic64_inc static inline void arch_atomic64_inc(atomic64_t *v) { __alternative_atomic64(inc, inc_return, /* no output */, @@ -229,6 +218,7 @@ static inline void arch_atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ +#define arch_atomic64_dec arch_atomic64_dec static inline void arch_atomic64_dec(atomic64_t *v) { __alternative_atomic64(dec, dec_return, /* no output */, @@ -236,46 +226,6 @@ static inline void arch_atomic64_dec(atomic64_t *v) } /** - * arch_atomic64_dec_and_test - decrement and test - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static inline int arch_atomic64_dec_and_test(atomic64_t *v) -{ - return arch_atomic64_dec_return(v) == 0; -} - -/** - * atomic64_inc_and_test - increment and test - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static inline int arch_atomic64_inc_and_test(atomic64_t *v) -{ - return arch_atomic64_inc_return(v) == 0; -} - -/** - * arch_atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static inline int arch_atomic64_add_negative(long long i, atomic64_t *v) -{ - return arch_atomic64_add_return(i, v) < 0; -} - -/** * arch_atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... @@ -295,7 +245,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, return (int)a; } - +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero static inline int arch_atomic64_inc_not_zero(atomic64_t *v) { int r; @@ -304,6 +254,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v) return r; } +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) { long long r; diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 6106b59d3260..849f1c566a11 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -71,6 +71,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); @@ -82,6 +83,7 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) * * Atomically increments @v by 1. */ +#define arch_atomic64_inc arch_atomic64_inc static __always_inline void arch_atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" @@ -95,6 +97,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ +#define arch_atomic64_dec arch_atomic64_dec static __always_inline void arch_atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" @@ -110,6 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test static inline bool arch_atomic64_dec_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); @@ -123,6 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test static inline bool arch_atomic64_inc_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); @@ -137,6 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ +#define arch_atomic64_add_negative arch_atomic64_add_negative static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); @@ -169,9 +175,6 @@ static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v) return xadd(&v->counter, -i); } -#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v))) -#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v))) - static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new) { return arch_cmpxchg(&v->counter, old, new); @@ -188,45 +191,6 @@ static inline long arch_atomic64_xchg(atomic64_t *v, long new) return xchg(&v->counter, new); } -/** - * arch_atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u) -{ - s64 c = arch_atomic64_read(v); - do { - if (unlikely(c == u)) - return false; - } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); - return true; -} - -#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0) - -/* - * arch_atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -static inline long arch_atomic64_dec_if_positive(atomic64_t *v) -{ - s64 dec, c = arch_atomic64_read(v); - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); - return dec; -} - static inline void arch_atomic64_and(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "andq %1,%0" diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index 4cf11d88d3b3..19b90521954c 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -5,6 +5,7 @@ * PaX/grsecurity. */ #include <linux/refcount.h> +#include <asm/bug.h> /* * This is the first portion of the refcount error handling, which lives in diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e7a23f2a519a..7de0149e1cf7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -197,107 +197,9 @@ ATOMIC_OPS(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -#define atomic_inc(v) atomic_add(1,(v)) - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -#define atomic_inc_return(v) atomic_add_return(1,(v)) - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -#define atomic_dec(v) atomic_sub(1,(v)) - -/** - * atomic_dec_return - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -#define atomic_dec_return(v) atomic_sub_return(1,(v)) - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0) - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0) - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) - #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index fa0729c1e776..d81c653b9bf6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v) { unsigned int counter; - counter = (unsigned int)__atomic_add_unless(v, 1, 0); + counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0); if (counter <= (unsigned int)INT_MAX) return (int)counter; diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index a6e904973ba8..475910ffbcb6 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -121,7 +121,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive) * this lock. */ if (!exclusive) - return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ? + return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? -EBUSY : 0; /* lock is either WRITE or DESTROY - should be exclusive */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index a1b18082991b..183cc5418722 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, trace_afs_notify_call(rxcall, call); call->need_attention = true; - u = __atomic_add_unless(&call->usage, 1, 0); + u = atomic_fetch_add_unless(&call->usage, 1, 0); if (u != 0) { trace_afs_call(call, afs_call_trace_wake, u, atomic_read(&call->net->nr_outstanding_calls), diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index ec07f23678ea..3c64e95d5ed0 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne } #endif -static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) +#ifdef arch_atomic_fetch_add_unless +#define atomic_fetch_add_unless atomic_fetch_add_unless +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { kasan_check_write(v, sizeof(*v)); - return __arch_atomic_add_unless(v, a, u); + return arch_atomic_fetch_add_unless(v, a, u); } +#endif - -static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +#ifdef arch_atomic64_fetch_add_unless +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_add_unless(v, a, u); + return arch_atomic64_fetch_add_unless(v, a, u); } +#endif +#ifdef arch_atomic_inc +#define atomic_inc atomic_inc static __always_inline void atomic_inc(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_inc(v); } +#endif +#ifdef arch_atomic64_inc +#define atomic64_inc atomic64_inc static __always_inline void atomic64_inc(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_inc(v); } +#endif +#ifdef arch_atomic_dec +#define atomic_dec atomic_dec static __always_inline void atomic_dec(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_dec(v); } +#endif +#ifdef atch_atomic64_dec +#define atomic64_dec static __always_inline void atomic64_dec(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic64_dec(v); } +#endif static __always_inline void atomic_add(int i, atomic_t *v) { @@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v) arch_atomic64_xor(i, v); } +#ifdef arch_atomic_inc_return +#define atomic_inc_return atomic_inc_return static __always_inline int atomic_inc_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } +#endif +#ifdef arch_atomic64_in_return +#define atomic64_inc_return atomic64_inc_return static __always_inline s64 atomic64_inc_return(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } +#endif +#ifdef arch_atomic_dec_return +#define atomic_dec_return atomic_dec_return static __always_inline int atomic_dec_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } +#endif +#ifdef arch_atomic64_dec_return +#define atomic64_dec_return atomic64_dec_return static __always_inline s64 atomic64_dec_return(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } +#endif -static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v) +#ifdef arch_atomic64_inc_not_zero +#define atomic64_inc_not_zero atomic64_inc_not_zero +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } +#endif +#ifdef arch_atomic64_dec_if_positive +#define atomic64_dec_if_positive atomic64_dec_if_positive static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } +#endif +#ifdef arch_atomic_dec_and_test +#define atomic_dec_and_test atomic_dec_and_test static __always_inline bool atomic_dec_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } +#endif +#ifdef arch_atomic64_dec_and_test +#define atomic64_dec_and_test atomic64_dec_and_test static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } +#endif +#ifdef arch_atomic_inc_and_test +#define atomic_inc_and_test atomic_inc_and_test static __always_inline bool atomic_inc_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } +#endif +#ifdef arch_atomic64_inc_and_test +#define atomic64_inc_and_test atomic64_inc_and_test static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } +#endif static __always_inline int atomic_add_return(int i, atomic_t *v) { @@ -325,29 +372,41 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) return arch_atomic64_fetch_xor(i, v); } +#ifdef arch_atomic_sub_and_test +#define atomic_sub_and_test atomic_sub_and_test static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } +#endif +#ifdef arch_atomic64_sub_and_test +#define atomic64_sub_and_test atomic64_sub_and_test static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } +#endif +#ifdef arch_atomic_add_negative +#define atomic_add_negative atomic_add_negative static __always_inline bool atomic_add_negative(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } +#endif +#ifdef arch_atomic64_add_negative +#define atomic64_add_negative atomic64_add_negative static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } +#endif static __always_inline unsigned long cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size) diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index abe6dd9ca2a8..13324aa828eb 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^) #include <linux/irqflags.h> -static inline int atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} - static inline void atomic_add(int i, atomic_t *v) { atomic_add_return(i, v); @@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v) atomic_sub_return(i, v); } -static inline void atomic_inc(atomic_t *v) -{ - atomic_add_return(1, v); -} - -static inline void atomic_dec(atomic_t *v) -{ - atomic_sub_return(1, v); -} - -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -#ifndef __atomic_add_unless -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) - c = old; - return c; -} -#endif - #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 8d28eb010d0d..97b28b7f1f29 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -11,6 +11,7 @@ */ #ifndef _ASM_GENERIC_ATOMIC64_H #define _ASM_GENERIC_ATOMIC64_H +#include <linux/types.h> typedef struct { long long counter; @@ -50,18 +51,10 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OP extern long long atomic64_dec_if_positive(atomic64_t *v); +#define atomic64_dec_if_positive atomic64_dec_if_positive extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_xchg(atomic64_t *v, long long new); -extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u); +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 04deffaf5f7d..dd90c9792909 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -2,189 +2,67 @@ #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ -#include <asm/types.h> -#include <linux/irqflags.h> - -#ifdef CONFIG_SMP -#include <asm/spinlock.h> -#include <asm/cache.h> /* we use L1_CACHE_BYTES */ - -/* Use an array of spinlocks for our atomic_ts. - * Hash function to index into a different SPINLOCK. - * Since "a" is usually an address, use one spinlock per cacheline. - */ -# define ATOMIC_HASH_SIZE 4 -# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) - -extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; - -/* Can't use raw_spin_lock_irq because of #include problems, so - * this is the substitute */ -#define _atomic_spin_lock_irqsave(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ - local_irq_save(f); \ - arch_spin_lock(s); \ -} while(0) - -#define _atomic_spin_unlock_irqrestore(l,f) do { \ - arch_spinlock_t *s = ATOMIC_HASH(l); \ - arch_spin_unlock(s); \ - local_irq_restore(f); \ -} while(0) - - -#else -# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) -# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) -#endif +#include <linux/atomic.h> +#include <linux/compiler.h> +#include <asm/barrier.h> /* - * NMI events can occur at any time, including when interrupts have been - * disabled by *_irqsave(). So you can get NMI events occurring while a - * *_bit function is holding a spin lock. If the NMI handler also wants - * to do bit manipulation (and they do) then you can get a deadlock - * between the original caller of *_bit() and the NMI handler. - * - * by Keith Owens + * Implementation of atomic bitops using atomic-fetch ops. + * See Documentation/atomic_bitops.txt for details. */ -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile unsigned long *addr) +static inline void set_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p |= mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static inline void clear_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p &= ~mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile unsigned long *addr) +static inline void change_bit(unsigned int nr, volatile unsigned long *p) { - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - *p ^= mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old | mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; - return (old & mask) != 0; + old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + return !!(old & mask); } -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old & ~mask; - _atomic_spin_unlock_irqrestore(p, flags); + p += BIT_WORD(nr); + if (!(READ_ONCE(*p) & mask)) + return 0; - return (old & mask) != 0; + old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + return !!(old & mask); } -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) { + long old; unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old ^ mask; - _atomic_spin_unlock_irqrestore(p, flags); - return (old & mask) != 0; + p += BIT_WORD(nr); + old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + return !!(old & mask); } #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 67ab280ad134..3ae021368f48 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -2,6 +2,10 @@ #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ #define _ASM_GENERIC_BITOPS_LOCK_H_ +#include <linux/atomic.h> +#include <linux/compiler.h> +#include <asm/barrier.h> + /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set @@ -11,7 +15,20 @@ * the returned value is 0. * It can be used to implement bit locks. */ -#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr) +static inline int test_and_set_bit_lock(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + if (READ_ONCE(*p) & mask) + return 1; + + old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + return !!(old & mask); +} + /** * clear_bit_unlock - Clear a bit in memory, for unlock @@ -20,11 +37,11 @@ * * This operation is atomic and provides release barrier semantics. */ -#define clear_bit_unlock(nr, addr) \ -do { \ - smp_mb__before_atomic(); \ - clear_bit(nr, addr); \ -} while (0) +static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +{ + p += BIT_WORD(nr); + atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); +} /** * __clear_bit_unlock - Clear a bit in memory, for unlock @@ -37,11 +54,38 @@ do { \ * * See for example x86's implementation. */ -#define __clear_bit_unlock(nr, addr) \ -do { \ - smp_mb__before_atomic(); \ - clear_bit(nr, addr); \ -} while (0) +static inline void __clear_bit_unlock(unsigned int nr, + volatile unsigned long *p) +{ + unsigned long old; -#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ + p += BIT_WORD(nr); + old = READ_ONCE(*p); + old &= ~BIT_MASK(nr); + atomic_long_set_release((atomic_long_t *)p, old); +} + +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +#ifndef clear_bit_unlock_is_negative_byte +static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) +{ + long old; + unsigned long mask = BIT_MASK(nr); + + p += BIT_WORD(nr); + old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + return !!(old & BIT(7)); +} +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif +#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 01ce3997cb42..8e04f1f69bd9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -2,6 +2,8 @@ /* Atomic operations usable in machine independent code */ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H +#include <linux/types.h> + #include <asm/atomic.h> #include <asm/barrier.h> @@ -95,11 +97,23 @@ #endif #endif /* atomic_add_return_relaxed */ +#ifndef atomic_inc +#define atomic_inc(v) atomic_add(1, (v)) +#endif + /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_relaxed + +#ifndef atomic_inc_return +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) +#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) +#else /* atomic_inc_return */ #define atomic_inc_return_relaxed atomic_inc_return #define atomic_inc_return_acquire atomic_inc_return #define atomic_inc_return_release atomic_inc_return +#endif /* atomic_inc_return */ #else /* atomic_inc_return_relaxed */ @@ -143,11 +157,23 @@ #endif #endif /* atomic_sub_return_relaxed */ +#ifndef atomic_dec +#define atomic_dec(v) atomic_sub(1, (v)) +#endif + /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_relaxed + +#ifndef atomic_dec_return +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) +#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) +#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) +#else /* atomic_dec_return */ #define atomic_dec_return_relaxed atomic_dec_return #define atomic_dec_return_acquire atomic_dec_return #define atomic_dec_return_release atomic_dec_return +#endif /* atomic_dec_return */ #else /* atomic_dec_return_relaxed */ @@ -328,12 +354,22 @@ #endif #endif /* atomic_fetch_and_relaxed */ -#ifdef atomic_andnot -/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_andnot +#define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) +#endif + #ifndef atomic_fetch_andnot_relaxed -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) +#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) +#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) +#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) +#else /* atomic_fetch_andnot */ +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ #else /* atomic_fetch_andnot_relaxed */ @@ -352,7 +388,6 @@ __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) #endif #endif /* atomic_fetch_andnot_relaxed */ -#endif /* atomic_andnot */ /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_relaxed @@ -520,112 +555,140 @@ #endif /* xchg_relaxed */ /** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic_fetch_add_unless +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#endif + +/** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline bool atomic_add_unless(atomic_t *v, int a, int u) { - return __atomic_add_unless(v, a, u) != u; + return atomic_fetch_add_unless(v, a, u) != u; } /** * atomic_inc_not_zero - increment unless the number is zero * @v: pointer of type atomic_t * - * Atomically increments @v by 1, so long as @v is non-zero. - * Returns non-zero if @v was non-zero, and zero otherwise. + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. */ #ifndef atomic_inc_not_zero #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #endif -#ifndef atomic_andnot -static inline void atomic_andnot(int i, atomic_t *v) -{ - atomic_and(~i, v); -} - -static inline int atomic_fetch_andnot(int i, atomic_t *v) -{ - return atomic_fetch_and(~i, v); -} - -static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_inc_and_test +static inline bool atomic_inc_and_test(atomic_t *v) { - return atomic_fetch_and_relaxed(~i, v); + return atomic_inc_return(v) == 0; } +#endif -static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic_dec_and_test +static inline bool atomic_dec_and_test(atomic_t *v) { - return atomic_fetch_and_acquire(~i, v); + return atomic_dec_return(v) == 0; } +#endif -static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_sub_and_test +static inline bool atomic_sub_and_test(int i, atomic_t *v) { - return atomic_fetch_and_release(~i, v); + return atomic_sub_return(i, v) == 0; } #endif /** - * atomic_inc_not_zero_hint - increment if not null + * atomic_add_negative - add and test if negative + * @i: integer value to add * @v: pointer of type atomic_t - * @hint: probable value of the atomic before the increment * - * This version of atomic_inc_not_zero() gives a hint of probable - * value of the atomic. This helps processor to not read the memory - * before doing the atomic read/modify/write cycle, lowering - * number of bus transactions on some arches. - * - * Returns: 0 if increment was not done, 1 otherwise. + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. */ -#ifndef atomic_inc_not_zero_hint -static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +#ifndef atomic_add_negative +static inline bool atomic_add_negative(int i, atomic_t *v) { - int val, c = hint; - - /* sanity test, should be removed by compiler if hint is a constant */ - if (!hint) - return atomic_inc_not_zero(v); - - do { - val = atomic_cmpxchg(v, c, c + 1); - if (val == c) - return 1; - c = val; - } while (c); - - return 0; + return atomic_add_return(i, v) < 0; } #endif #ifndef atomic_inc_unless_negative -static inline int atomic_inc_unless_negative(atomic_t *p) +static inline bool atomic_inc_unless_negative(atomic_t *v) { - int v, v1; - for (v = 0; v >= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v + 1); - if (likely(v1 == v)) - return 1; - } - return 0; + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; } #endif #ifndef atomic_dec_unless_positive -static inline int atomic_dec_unless_positive(atomic_t *p) +static inline bool atomic_dec_unless_positive(atomic_t *v) { - int v, v1; - for (v = 0; v <= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v - 1); - if (likely(v1 == v)) - return 1; - } - return 0; + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; } #endif @@ -639,17 +702,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p) #ifndef atomic_dec_if_positive static inline int atomic_dec_if_positive(atomic_t *v) { - int c, old, dec; - c = atomic_read(v); - for (;;) { + int dec, c = atomic_read(v); + + do { dec = c - 1; if (unlikely(dec < 0)) break; - old = atomic_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } + } while (!atomic_try_cmpxchg(v, &c, dec)); + return dec; } #endif @@ -693,11 +753,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_add_return_relaxed */ +#ifndef atomic64_inc +#define atomic64_inc(v) atomic64_add(1, (v)) +#endif + /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_relaxed + +#ifndef atomic64_inc_return +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) +#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) +#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) +#else /* atomic64_inc_return */ #define atomic64_inc_return_relaxed atomic64_inc_return #define atomic64_inc_return_acquire atomic64_inc_return #define atomic64_inc_return_release atomic64_inc_return +#endif /* atomic64_inc_return */ #else /* atomic64_inc_return_relaxed */ @@ -742,11 +814,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_sub_return_relaxed */ +#ifndef atomic64_dec +#define atomic64_dec(v) atomic64_sub(1, (v)) +#endif + /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_relaxed + +#ifndef atomic64_dec_return +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) +#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) +#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) +#else /* atomic64_dec_return */ #define atomic64_dec_return_relaxed atomic64_dec_return #define atomic64_dec_return_acquire atomic64_dec_return #define atomic64_dec_return_release atomic64_dec_return +#endif /* atomic64_dec_return */ #else /* atomic64_dec_return_relaxed */ @@ -927,12 +1011,22 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_fetch_and_relaxed */ -#ifdef atomic64_andnot -/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_andnot +#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) +#endif + #ifndef atomic64_fetch_andnot_relaxed -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) +#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) +#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) +#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) +#else /* atomic64_fetch_andnot */ +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ #else /* atomic64_fetch_andnot_relaxed */ @@ -951,7 +1045,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) #endif #endif /* atomic64_fetch_andnot_relaxed */ -#endif /* atomic64_andnot */ /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_relaxed @@ -1049,30 +1142,164 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg #endif /* atomic64_try_cmpxchg */ -#ifndef atomic64_andnot -static inline void atomic64_andnot(long long i, atomic64_t *v) +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic64_fetch_add_unless +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - atomic64_and(~i, v); + long long c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; } +#endif -static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) { - return atomic64_fetch_and(~i, v); + return atomic64_fetch_add_unless(v, a, u) != u; } -static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +#ifndef atomic64_inc_not_zero +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#endif + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_inc_and_test +static inline bool atomic64_inc_and_test(atomic64_t *v) { - return atomic64_fetch_and_relaxed(~i, v); + return atomic64_inc_return(v) == 0; } +#endif -static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic64_dec_and_test +static inline bool atomic64_dec_and_test(atomic64_t *v) { - return atomic64_fetch_and_acquire(~i, v); + return atomic64_dec_return(v) == 0; } +#endif -static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_sub_and_test +static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) { - return atomic64_fetch_and_release(~i, v); + return atomic64_sub_return(i, v) == 0; +} +#endif + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#ifndef atomic64_add_negative +static inline bool atomic64_add_negative(long long i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool atomic64_inc_unless_negative(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool atomic64_dec_unless_positive(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#endif + +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic64_t + * + * The function returns the old value of *v minus 1, even if + * the atomic64 variable, v, was not decremented. + */ +#ifndef atomic64_dec_if_positive +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; } #endif diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 4cac4e1a72ff..af419012d77d 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -2,29 +2,9 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H #include <asm/types.h> +#include <linux/bits.h> -#ifdef __KERNEL__ -#define BIT(nr) (1UL << (nr)) -#define BIT_ULL(nr) (1ULL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) -#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) -#define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) -#endif - -/* - * Create a contiguous bitmask starting at bit position @l and ending at - * position @h. For example - * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. - */ -#define GENMASK(h, l) \ - (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) - -#define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/bits.h b/include/linux/bits.h new file mode 100644 index 000000000000..2b7b532c1d51 --- /dev/null +++ b/include/linux/bits.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITS_H +#define __LINUX_BITS_H +#include <asm/bitsperlong.h> + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 + +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#endif /* __LINUX_BITS_H */ diff --git a/include/linux/refcount.h b/include/linux/refcount.h index a685da2c4522..e28cce21bad6 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -3,9 +3,10 @@ #define _LINUX_REFCOUNT_H #include <linux/atomic.h> -#include <linux/mutex.h> -#include <linux/spinlock.h> -#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/spinlock_types.h> + +struct mutex; /** * struct refcount_t - variant of atomic_t specialized for reference counts @@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } +extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r); +extern void refcount_add_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r); +extern void refcount_inc_checked(refcount_t *r); + +extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_dec_and_test_checked(refcount_t *r); +extern void refcount_dec_checked(refcount_t *r); + #ifdef CONFIG_REFCOUNT_FULL -extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); -extern void refcount_add(unsigned int i, refcount_t *r); -extern __must_check bool refcount_inc_not_zero(refcount_t *r); -extern void refcount_inc(refcount_t *r); +#define refcount_add_not_zero refcount_add_not_zero_checked +#define refcount_add refcount_add_checked + +#define refcount_inc_not_zero refcount_inc_not_zero_checked +#define refcount_inc refcount_inc_checked + +#define refcount_sub_and_test refcount_sub_and_test_checked -extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +#define refcount_dec_and_test refcount_dec_and_test_checked +#define refcount_dec refcount_dec_checked -extern __must_check bool refcount_dec_and_test(refcount_t *r); -extern void refcount_dec(refcount_t *r); #else # ifdef CONFIG_ARCH_HAS_REFCOUNT # include <asm/refcount.h> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d10ecd78105f..8363fd83d399 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, { int refold; - refold = __atomic_add_unless(&map->refcnt, 1, 0); + refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); if (refold >= BPF_MAX_REFCNT) { __bpf_map_put(map, false); @@ -1142,7 +1142,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) { int refold; - refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); + refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); if (refold >= BPF_MAX_REFCNT) { __bpf_prog_put(prog, false); diff --git a/lib/atomic64.c b/lib/atomic64.c index 53c2d5edc826..1d91e31eceec 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new) } EXPORT_SYMBOL(atomic64_xchg); -int atomic64_add_unless(atomic64_t *v, long long a, long long u) +long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - int ret = 0; + long long val; raw_spin_lock_irqsave(lock, flags); - if (v->counter != u) { + val = v->counter; + if (val != u) v->counter += a; - ret = 1; - } raw_spin_unlock_irqrestore(lock, flags); - return ret; + + return val; } -EXPORT_SYMBOL(atomic64_add_unless); +EXPORT_SYMBOL(atomic64_fetch_add_unless); diff --git a/lib/refcount.c b/lib/refcount.c index d3b81cefce91..ebcf8cd49e05 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -35,13 +35,13 @@ * */ +#include <linux/mutex.h> #include <linux/refcount.h> +#include <linux/spinlock.h> #include <linux/bug.h> -#ifdef CONFIG_REFCOUNT_FULL - /** - * refcount_add_not_zero - add a value to a refcount unless it is 0 + * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * @@ -58,7 +58,7 @@ * * Return: false if the passed refcount is 0, true otherwise */ -bool refcount_add_not_zero(unsigned int i, refcount_t *r) +bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); @@ -79,10 +79,10 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) return true; } -EXPORT_SYMBOL(refcount_add_not_zero); +EXPORT_SYMBOL(refcount_add_not_zero_checked); /** - * refcount_add - add a value to a refcount + * refcount_add_checked - add a value to a refcount * @i: the value to add to the refcount * @r: the refcount * @@ -97,14 +97,14 @@ EXPORT_SYMBOL(refcount_add_not_zero); * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ -void refcount_add(unsigned int i, refcount_t *r) +void refcount_add_checked(unsigned int i, refcount_t *r) { - WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); + WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); } -EXPORT_SYMBOL(refcount_add); +EXPORT_SYMBOL(refcount_add_checked); /** - * refcount_inc_not_zero - increment a refcount unless it is 0 + * refcount_inc_not_zero_checked - increment a refcount unless it is 0 * @r: the refcount to increment * * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. @@ -115,7 +115,7 @@ EXPORT_SYMBOL(refcount_add); * * Return: true if the increment was successful, false otherwise */ -bool refcount_inc_not_zero(refcount_t *r) +bool refcount_inc_not_zero_checked(refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); @@ -134,10 +134,10 @@ bool refcount_inc_not_zero(refcount_t *r) return true; } -EXPORT_SYMBOL(refcount_inc_not_zero); +EXPORT_SYMBOL(refcount_inc_not_zero_checked); /** - * refcount_inc - increment a refcount + * refcount_inc_checked - increment a refcount * @r: the refcount to increment * * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. @@ -148,14 +148,14 @@ EXPORT_SYMBOL(refcount_inc_not_zero); * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ -void refcount_inc(refcount_t *r) +void refcount_inc_checked(refcount_t *r) { - WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); + WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); } -EXPORT_SYMBOL(refcount_inc); +EXPORT_SYMBOL(refcount_inc_checked); /** - * refcount_sub_and_test - subtract from a refcount and test if it is 0 + * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 * @i: amount to subtract from the refcount * @r: the refcount * @@ -174,7 +174,7 @@ EXPORT_SYMBOL(refcount_inc); * * Return: true if the resulting refcount is 0, false otherwise */ -bool refcount_sub_and_test(unsigned int i, refcount_t *r) +bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) { unsigned int new, val = atomic_read(&r->refs); @@ -192,10 +192,10 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) return !new; } -EXPORT_SYMBOL(refcount_sub_and_test); +EXPORT_SYMBOL(refcount_sub_and_test_checked); /** - * refcount_dec_and_test - decrement a refcount and test if it is 0 + * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 * @r: the refcount * * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to @@ -207,14 +207,14 @@ EXPORT_SYMBOL(refcount_sub_and_test); * * Return: true if the resulting refcount is 0, false otherwise */ -bool refcount_dec_and_test(refcount_t *r) +bool refcount_dec_and_test_checked(refcount_t *r) { - return refcount_sub_and_test(1, r); + return refcount_sub_and_test_checked(1, r); } -EXPORT_SYMBOL(refcount_dec_and_test); +EXPORT_SYMBOL(refcount_dec_and_test_checked); /** - * refcount_dec - decrement a refcount + * refcount_dec_checked - decrement a refcount * @r: the refcount * * Similar to atomic_dec(), it will WARN on underflow and fail to decrement @@ -223,12 +223,11 @@ EXPORT_SYMBOL(refcount_dec_and_test); * Provides release memory ordering, such that prior loads and stores are done * before. */ -void refcount_dec(refcount_t *r) +void refcount_dec_checked(refcount_t *r) { - WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); + WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); } -EXPORT_SYMBOL(refcount_dec); -#endif /* CONFIG_REFCOUNT_FULL */ +EXPORT_SYMBOL(refcount_dec_checked); /** * refcount_dec_if_one - decrement a refcount if it is 1 diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index af8c4b38b746..d84227d75717 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -244,7 +244,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) * the packet count limit, so... */ if (atm_may_send(pvcc->atmvcc, size) && - atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT)) + atomic_inc_not_zero(&pvcc->inflight)) return 1; /* diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f6734d8cb01a..9486293fef5c 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, bool rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = __atomic_add_unless(&call->usage, 1, 0); + int n = atomic_fetch_add_unless(&call->usage, 1, 0); if (n == 0) return false; if (rxrpc_queue_work(&call->processor)) diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 4c77a78a252a..77440a356b14 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn) bool rxrpc_queue_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = __atomic_add_unless(&conn->usage, 1, 0); + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); if (n == 0) return false; if (rxrpc_queue_work(&conn->processor)) @@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn) const void *here = __builtin_return_address(0); if (conn) { - int n = __atomic_add_unless(&conn->usage, 1, 0); + int n = atomic_fetch_add_unless(&conn->usage, 1, 0); if (n > 0) trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); else diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b493e6b62740..777c3ed4cfc0 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) const void *here = __builtin_return_address(0); if (local) { - int n = __atomic_add_unless(&local->usage, 1, 0); + int n = atomic_fetch_add_unless(&local->usage, 1, 0); if (n > 0) trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); else diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 1b7e8107b3ae..1cf3b408017a 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) const void *here = __builtin_return_address(0); if (peer) { - int n = __atomic_add_unless(&peer->usage, 1, 0); + int n = atomic_fetch_add_unless(&peer->usage, 1, 0); if (n > 0) trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here); else |