diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/atomic.h | 453 | ||||
-rw-r--r-- | include/linux/bitops.h | 22 | ||||
-rw-r--r-- | include/linux/bits.h | 26 | ||||
-rw-r--r-- | include/linux/cpu.h | 2 | ||||
-rw-r--r-- | include/linux/cpuhotplug.h | 1 | ||||
-rw-r--r-- | include/linux/efi.h | 15 | ||||
-rw-r--r-- | include/linux/irqchip/arm-gic-v3.h | 3 | ||||
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | include/linux/nmi.h | 10 | ||||
-rw-r--r-- | include/linux/pci.h | 1 | ||||
-rw-r--r-- | include/linux/rculist.h | 19 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 20 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 2 | ||||
-rw-r--r-- | include/linux/refcount.h | 34 | ||||
-rw-r--r-- | include/linux/sched.h | 5 | ||||
-rw-r--r-- | include/linux/sched/sysctl.h | 1 | ||||
-rw-r--r-- | include/linux/smpboot.h | 15 | ||||
-rw-r--r-- | include/linux/spinlock.h | 53 | ||||
-rw-r--r-- | include/linux/srcu.h | 17 | ||||
-rw-r--r-- | include/linux/swait.h | 36 | ||||
-rw-r--r-- | include/linux/torture.h | 4 |
21 files changed, 527 insertions, 215 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 01ce3997cb42..1e8e88bdaf09 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -2,6 +2,8 @@ /* Atomic operations usable in machine independent code */ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H +#include <linux/types.h> + #include <asm/atomic.h> #include <asm/barrier.h> @@ -36,40 +38,46 @@ * barriers on top of the relaxed variant. In the case where the relaxed * variant is already fully ordered, no additional barriers are needed. * - * Besides, if an arch has a special barrier for acquire/release, it could - * implement its own __atomic_op_* and use the same framework for building - * variants - * - * If an architecture overrides __atomic_op_acquire() it will probably want - * to define smp_mb__after_spinlock(). + * If an architecture overrides __atomic_acquire_fence() it will probably + * want to define smp_mb__after_spinlock(). */ -#ifndef __atomic_op_acquire +#ifndef __atomic_acquire_fence +#define __atomic_acquire_fence smp_mb__after_atomic +#endif + +#ifndef __atomic_release_fence +#define __atomic_release_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_pre_full_fence +#define __atomic_pre_full_fence smp_mb__before_atomic +#endif + +#ifndef __atomic_post_full_fence +#define __atomic_post_full_fence smp_mb__after_atomic +#endif + #define __atomic_op_acquire(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ - smp_mb__after_atomic(); \ + __atomic_acquire_fence(); \ __ret; \ }) -#endif -#ifndef __atomic_op_release #define __atomic_op_release(op, args...) \ ({ \ - smp_mb__before_atomic(); \ + __atomic_release_fence(); \ op##_relaxed(args); \ }) -#endif -#ifndef __atomic_op_fence #define __atomic_op_fence(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret; \ - smp_mb__before_atomic(); \ + __atomic_pre_full_fence(); \ __ret = op##_relaxed(args); \ - smp_mb__after_atomic(); \ + __atomic_post_full_fence(); \ __ret; \ }) -#endif /* atomic_add_return_relaxed */ #ifndef atomic_add_return_relaxed @@ -95,11 +103,23 @@ #endif #endif /* atomic_add_return_relaxed */ +#ifndef atomic_inc +#define atomic_inc(v) atomic_add(1, (v)) +#endif + /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_relaxed + +#ifndef atomic_inc_return +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) +#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) +#else /* atomic_inc_return */ #define atomic_inc_return_relaxed atomic_inc_return #define atomic_inc_return_acquire atomic_inc_return #define atomic_inc_return_release atomic_inc_return +#endif /* atomic_inc_return */ #else /* atomic_inc_return_relaxed */ @@ -143,11 +163,23 @@ #endif #endif /* atomic_sub_return_relaxed */ +#ifndef atomic_dec +#define atomic_dec(v) atomic_sub(1, (v)) +#endif + /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_relaxed + +#ifndef atomic_dec_return +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) +#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) +#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) +#else /* atomic_dec_return */ #define atomic_dec_return_relaxed atomic_dec_return #define atomic_dec_return_acquire atomic_dec_return #define atomic_dec_return_release atomic_dec_return +#endif /* atomic_dec_return */ #else /* atomic_dec_return_relaxed */ @@ -328,12 +360,22 @@ #endif #endif /* atomic_fetch_and_relaxed */ -#ifdef atomic_andnot -/* atomic_fetch_andnot_relaxed */ +#ifndef atomic_andnot +#define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) +#endif + #ifndef atomic_fetch_andnot_relaxed -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot + +#ifndef atomic_fetch_andnot +#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) +#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) +#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) +#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) +#else /* atomic_fetch_andnot */ +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ #else /* atomic_fetch_andnot_relaxed */ @@ -352,7 +394,6 @@ __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) #endif #endif /* atomic_fetch_andnot_relaxed */ -#endif /* atomic_andnot */ /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_relaxed @@ -520,112 +561,140 @@ #endif /* xchg_relaxed */ /** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic_fetch_add_unless +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#endif + +/** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline bool atomic_add_unless(atomic_t *v, int a, int u) { - return __atomic_add_unless(v, a, u) != u; + return atomic_fetch_add_unless(v, a, u) != u; } /** * atomic_inc_not_zero - increment unless the number is zero * @v: pointer of type atomic_t * - * Atomically increments @v by 1, so long as @v is non-zero. - * Returns non-zero if @v was non-zero, and zero otherwise. + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. */ #ifndef atomic_inc_not_zero #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #endif -#ifndef atomic_andnot -static inline void atomic_andnot(int i, atomic_t *v) -{ - atomic_and(~i, v); -} - -static inline int atomic_fetch_andnot(int i, atomic_t *v) -{ - return atomic_fetch_and(~i, v); -} - -static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_inc_and_test +static inline bool atomic_inc_and_test(atomic_t *v) { - return atomic_fetch_and_relaxed(~i, v); + return atomic_inc_return(v) == 0; } +#endif -static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic_dec_and_test +static inline bool atomic_dec_and_test(atomic_t *v) { - return atomic_fetch_and_acquire(~i, v); + return atomic_dec_return(v) == 0; } +#endif -static inline int atomic_fetch_andnot_release(int i, atomic_t *v) +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic_sub_and_test +static inline bool atomic_sub_and_test(int i, atomic_t *v) { - return atomic_fetch_and_release(~i, v); + return atomic_sub_return(i, v) == 0; } #endif /** - * atomic_inc_not_zero_hint - increment if not null + * atomic_add_negative - add and test if negative + * @i: integer value to add * @v: pointer of type atomic_t - * @hint: probable value of the atomic before the increment - * - * This version of atomic_inc_not_zero() gives a hint of probable - * value of the atomic. This helps processor to not read the memory - * before doing the atomic read/modify/write cycle, lowering - * number of bus transactions on some arches. * - * Returns: 0 if increment was not done, 1 otherwise. + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. */ -#ifndef atomic_inc_not_zero_hint -static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +#ifndef atomic_add_negative +static inline bool atomic_add_negative(int i, atomic_t *v) { - int val, c = hint; - - /* sanity test, should be removed by compiler if hint is a constant */ - if (!hint) - return atomic_inc_not_zero(v); - - do { - val = atomic_cmpxchg(v, c, c + 1); - if (val == c) - return 1; - c = val; - } while (c); - - return 0; + return atomic_add_return(i, v) < 0; } #endif #ifndef atomic_inc_unless_negative -static inline int atomic_inc_unless_negative(atomic_t *p) +static inline bool atomic_inc_unless_negative(atomic_t *v) { - int v, v1; - for (v = 0; v >= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v + 1); - if (likely(v1 == v)) - return 1; - } - return 0; + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; } #endif #ifndef atomic_dec_unless_positive -static inline int atomic_dec_unless_positive(atomic_t *p) +static inline bool atomic_dec_unless_positive(atomic_t *v) { - int v, v1; - for (v = 0; v <= 0; v = v1) { - v1 = atomic_cmpxchg(p, v, v - 1); - if (likely(v1 == v)) - return 1; - } - return 0; + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; } #endif @@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p) #ifndef atomic_dec_if_positive static inline int atomic_dec_if_positive(atomic_t *v) { - int c, old, dec; - c = atomic_read(v); - for (;;) { + int dec, c = atomic_read(v); + + do { dec = c - 1; if (unlikely(dec < 0)) break; - old = atomic_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } + } while (!atomic_try_cmpxchg(v, &c, dec)); + return dec; } #endif @@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_add_return_relaxed */ +#ifndef atomic64_inc +#define atomic64_inc(v) atomic64_add(1, (v)) +#endif + /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_relaxed + +#ifndef atomic64_inc_return +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) +#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) +#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) +#else /* atomic64_inc_return */ #define atomic64_inc_return_relaxed atomic64_inc_return #define atomic64_inc_return_acquire atomic64_inc_return #define atomic64_inc_return_release atomic64_inc_return +#endif /* atomic64_inc_return */ #else /* atomic64_inc_return_relaxed */ @@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_sub_return_relaxed */ +#ifndef atomic64_dec +#define atomic64_dec(v) atomic64_sub(1, (v)) +#endif + /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_relaxed + +#ifndef atomic64_dec_return +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) +#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) +#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) +#else /* atomic64_dec_return */ #define atomic64_dec_return_relaxed atomic64_dec_return #define atomic64_dec_return_acquire atomic64_dec_return #define atomic64_dec_return_release atomic64_dec_return +#endif /* atomic64_dec_return */ #else /* atomic64_dec_return_relaxed */ @@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v) #endif #endif /* atomic64_fetch_and_relaxed */ -#ifdef atomic64_andnot -/* atomic64_fetch_andnot_relaxed */ +#ifndef atomic64_andnot +#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) +#endif + #ifndef atomic64_fetch_andnot_relaxed -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot + +#ifndef atomic64_fetch_andnot +#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) +#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) +#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) +#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) +#else /* atomic64_fetch_andnot */ +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ #else /* atomic64_fetch_andnot_relaxed */ @@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) #endif #endif /* atomic64_fetch_andnot_relaxed */ -#endif /* atomic64_andnot */ /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_relaxed @@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg #endif /* atomic64_try_cmpxchg */ -#ifndef atomic64_andnot -static inline void atomic64_andnot(long long i, atomic64_t *v) +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic64_fetch_add_unless +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - atomic64_and(~i, v); + long long c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; } +#endif -static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) { - return atomic64_fetch_and(~i, v); + return atomic64_fetch_add_unless(v, a, u) != u; } -static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +#ifndef atomic64_inc_not_zero +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#endif + +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_inc_and_test +static inline bool atomic64_inc_and_test(atomic64_t *v) { - return atomic64_fetch_and_relaxed(~i, v); + return atomic64_inc_return(v) == 0; } +#endif -static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +#ifndef atomic64_dec_and_test +static inline bool atomic64_dec_and_test(atomic64_t *v) { - return atomic64_fetch_and_acquire(~i, v); + return atomic64_dec_return(v) == 0; } +#endif -static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +#ifndef atomic64_sub_and_test +static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} +#endif + +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +#ifndef atomic64_add_negative +static inline bool atomic64_add_negative(long long i, atomic64_t *v) { - return atomic64_fetch_and_release(~i, v); + return atomic64_add_return(i, v) < 0; +} +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool atomic64_inc_unless_negative(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool atomic64_dec_unless_positive(atomic64_t *v) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#endif + +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic64_t + * + * The function returns the old value of *v minus 1, even if + * the atomic64 variable, v, was not decremented. + */ +#ifndef atomic64_dec_if_positive +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; } #endif diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 4cac4e1a72ff..af419012d77d 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -2,29 +2,9 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H #include <asm/types.h> +#include <linux/bits.h> -#ifdef __KERNEL__ -#define BIT(nr) (1UL << (nr)) -#define BIT_ULL(nr) (1ULL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) -#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) -#define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) -#endif - -/* - * Create a contiguous bitmask starting at bit position @l and ending at - * position @h. For example - * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. - */ -#define GENMASK(h, l) \ - (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) - -#define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/bits.h b/include/linux/bits.h new file mode 100644 index 000000000000..2b7b532c1d51 --- /dev/null +++ b/include/linux/bits.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITS_H +#define __LINUX_BITS_H +#include <asm/bitsperlong.h> + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 + +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#endif /* __LINUX_BITS_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a97a63eef59f..3233fbe23594 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,7 +30,7 @@ struct cpu { }; extern void boot_cpu_init(void); -extern void boot_cpu_state_init(void); +extern void boot_cpu_hotplug_init(void); extern void cpu_init(void); extern void trap_init(void); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 8796ba387152..4cf06a64bc02 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -164,6 +164,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, + CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/efi.h b/include/linux/efi.h index 56add823f190..401e4b254e30 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -894,6 +894,16 @@ typedef struct _efi_file_handle { void *flush; } efi_file_handle_t; +typedef struct { + u64 revision; + u32 open_volume; +} efi_file_io_interface_32_t; + +typedef struct { + u64 revision; + u64 open_volume; +} efi_file_io_interface_64_t; + typedef struct _efi_file_io_interface { u64 revision; int (*open_volume)(struct _efi_file_io_interface *, @@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #ifdef CONFIG_X86 -extern void efi_late_init(void); extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking); extern void efi_find_mirror(void); #else -static inline void efi_late_init(void) {} static inline void efi_free_boot_services(void) {} static inline efi_status_t efi_query_variable_store(u32 attributes, @@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog { extern int efi_tpm_eventlog_init(void); +/* Workqueue to queue EFI Runtime Services */ +extern struct workqueue_struct *efi_rts_wq; + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index cbb872c1b607..9d2ea3e907d0 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -73,6 +73,7 @@ #define GICD_TYPER_MBIS (1U << 16) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) @@ -576,8 +577,8 @@ struct rdists { phys_addr_t phys_base; } __percpu *rdist; struct page *prop_page; - int id_bits; u64 flags; + u32 gicd_typer; bool has_vlpis; bool has_direct_lpi; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 7ba6d356d18f..68a5121694ef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -466,6 +466,9 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) vma->vm_ops = NULL; } +/* flush_tlb_range() takes a vma, not a mm, and can care about flags */ +#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } + struct mmu_gather; struct inode; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index b8d868d23e79..08f9247e9827 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); extern unsigned int softlockup_panic; -#else + +extern int lockup_detector_online_cpu(unsigned int cpu); +extern int lockup_detector_offline_cpu(unsigned int cpu); +#else /* CONFIG_SOFTLOCKUP_DETECTOR */ static inline void touch_softlockup_watchdog_sched(void) { } static inline void touch_softlockup_watchdog(void) { } static inline void touch_softlockup_watchdog_sync(void) { } static inline void touch_all_softlockup_watchdogs(void) { } -#endif + +#define lockup_detector_online_cpu NULL +#define lockup_detector_offline_cpu NULL +#endif /* CONFIG_SOFTLOCKUP_DETECTOR */ #ifdef CONFIG_DETECT_HUNG_TASK void reset_hung_task_detector(void); diff --git a/include/linux/pci.h b/include/linux/pci.h index abd5d5e17aee..c133ccfa002e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -368,7 +368,6 @@ struct pci_dev { unsigned int transparent:1; /* Subtractive decode bridge */ unsigned int multifunction:1; /* Multi-function device */ - unsigned int is_added:1; unsigned int is_busmaster:1; /* Is busmaster */ unsigned int no_msi:1; /* May not use MSI */ unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 36df6ccbc874..4786c2235b98 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after - * the current position. + * the current position which must have been in the list when the RCU read + * lock was taken. + * This would typically require either that you obtained the node from a + * previous walk of the list in the same RCU read-side critical section, or + * that you held some sort of non-RCU reference (such as a reference count) + * to keep the node alive *and* in the list. + * + * This iterator is similar to list_for_each_entry_from_rcu() except + * this starts after the given position and that one starts at the given + * position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ @@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * * Iterate over the tail of a list starting from a given position, * which must have been in the list when the RCU read lock was taken. + * This would typically require either that you obtained the node from a + * previous walk of the list in the same RCU read-side critical section, or + * that you held some sort of non-RCU reference (such as a reference count) + * to keep the node alive *and* in the list. + * + * This iterator is similar to list_for_each_entry_continue_rcu() except + * this starts from the given position and that one starts from the position + * after the given position. */ #define list_for_each_entry_from_rcu(pos, head, member) \ for (; &(pos)->member != (head); \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 65163aa0bb04..75e5b393cf44 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -64,7 +64,6 @@ void rcu_barrier_tasks(void); void __rcu_read_lock(void); void __rcu_read_unlock(void); -void rcu_read_unlock_special(struct task_struct *t); void synchronize_rcu(void); /* @@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { } } while (0) /* - * Note a voluntary context switch for RCU-tasks benefit. This is a - * macro rather than an inline function to avoid #include hell. + * Note a quasi-voluntary context switch for RCU-tasks's benefit. + * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU -#define rcu_note_voluntary_context_switch_lite(t) \ +#define rcu_tasks_qs(t) \ do { \ if (READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ @@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { } #define rcu_note_voluntary_context_switch(t) \ do { \ rcu_all_qs(); \ - rcu_note_voluntary_context_switch_lite(t); \ + rcu_tasks_qs(t); \ } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU */ -#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) +#define rcu_tasks_qs(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) rcu_all_qs() #define call_rcu_tasks call_rcu_sched #define synchronize_rcu_tasks synchronize_sched @@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { } */ #define cond_resched_tasks_rcu_qs() \ do { \ - if (!cond_resched()) \ - rcu_note_voluntary_context_switch_lite(current); \ + rcu_tasks_qs(current); \ + cond_resched(); \ } while (0) /* @@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { } * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to - * kill_dependency(). It could be used as follows: - * `` + * kill_dependency(). It could be used as follows:: + * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); @@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { } * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); - *`` */ #define rcu_pointer_handoff(p) (p) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7b3c82e8a625..8d9a0ea8f0b5 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, #define rcu_note_context_switch(preempt) \ do { \ rcu_sched_qs(); \ - rcu_note_voluntary_context_switch_lite(current); \ + rcu_tasks_qs(current); \ } while (0) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) diff --git a/include/linux/refcount.h b/include/linux/refcount.h index a685da2c4522..e28cce21bad6 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -3,9 +3,10 @@ #define _LINUX_REFCOUNT_H #include <linux/atomic.h> -#include <linux/mutex.h> -#include <linux/spinlock.h> -#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/spinlock_types.h> + +struct mutex; /** * struct refcount_t - variant of atomic_t specialized for reference counts @@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } +extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r); +extern void refcount_add_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r); +extern void refcount_inc_checked(refcount_t *r); + +extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r); + +extern __must_check bool refcount_dec_and_test_checked(refcount_t *r); +extern void refcount_dec_checked(refcount_t *r); + #ifdef CONFIG_REFCOUNT_FULL -extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); -extern void refcount_add(unsigned int i, refcount_t *r); -extern __must_check bool refcount_inc_not_zero(refcount_t *r); -extern void refcount_inc(refcount_t *r); +#define refcount_add_not_zero refcount_add_not_zero_checked +#define refcount_add refcount_add_checked + +#define refcount_inc_not_zero refcount_inc_not_zero_checked +#define refcount_inc refcount_inc_checked + +#define refcount_sub_and_test refcount_sub_and_test_checked -extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +#define refcount_dec_and_test refcount_dec_and_test_checked +#define refcount_dec refcount_dec_checked -extern __must_check bool refcount_dec_and_test(refcount_t *r); -extern void refcount_dec(refcount_t *r); #else # ifdef CONFIG_ARCH_HAS_REFCOUNT # include <asm/refcount.h> diff --git a/include/linux/sched.h b/include/linux/sched.h index 43731fe51c97..dac5086e3815 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -167,8 +167,8 @@ struct task_group; * need_sleep = false; * wake_up_state(p, TASK_UNINTERRUPTIBLE); * - * Where wake_up_state() (and all other wakeup primitives) imply enough - * barriers to order the store of the variable against wakeup. + * where wake_up_state() executes a full memory barrier before accessing the + * task state. * * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a @@ -1017,7 +1017,6 @@ struct task_struct { u64 last_sum_exec_runtime; struct callback_head numa_work; - struct list_head numa_entry; struct numa_group *numa_group; /* diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 1c1a1512ec55..913488d828cb 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; #ifdef CONFIG_SCHED_DEBUG extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_nr_migrate; -extern __read_mostly unsigned int sysctl_sched_time_avg; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index c174844cf663..d0884b525001 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -25,8 +25,6 @@ struct smpboot_thread_data; * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) - * @cpumask: Internal state. To update which threads are unparked, - * call smpboot_update_cpumask_percpu_thread(). * @selfparking: Thread is not parked by the park function. * @thread_comm: The base name of the thread */ @@ -40,23 +38,12 @@ struct smp_hotplug_thread { void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); - cpumask_var_t cpumask; bool selfparking; const char *thread_comm; }; -int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread, - const struct cpumask *cpumask); - -static inline int -smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) -{ - return smpboot_register_percpu_thread_cpumask(plug_thread, - cpu_possible_mask); -} +int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); -void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, - const struct cpumask *); #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index fd57888d4942..3190997df9ca 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -114,29 +114,48 @@ do { \ #endif /*arch_spin_is_contended*/ /* - * This barrier must provide two things: + * smp_mb__after_spinlock() provides the equivalent of a full memory barrier + * between program-order earlier lock acquisitions and program-order later + * memory accesses. * - * - it must guarantee a STORE before the spin_lock() is ordered against a - * LOAD after it, see the comments at its two usage sites. + * This guarantees that the following two properties hold: * - * - it must ensure the critical section is RCsc. + * 1) Given the snippet: * - * The latter is important for cases where we observe values written by other - * CPUs in spin-loops, without barriers, while being subject to scheduling. + * { X = 0; Y = 0; } * - * CPU0 CPU1 CPU2 + * CPU0 CPU1 * - * for (;;) { - * if (READ_ONCE(X)) - * break; - * } - * X=1 - * <sched-out> - * <sched-in> - * r = X; + * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); + * spin_lock(S); smp_mb(); + * smp_mb__after_spinlock(); r1 = READ_ONCE(X); + * r0 = READ_ONCE(Y); + * spin_unlock(S); * - * without transitivity it could be that CPU1 observes X!=0 breaks the loop, - * we get migrated and CPU2 sees X==0. + * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) + * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments + * preceding the call to smp_mb__after_spinlock() in __schedule() and in + * try_to_wake_up(). + * + * 2) Given the snippet: + * + * { X = 0; Y = 0; } + * + * CPU0 CPU1 CPU2 + * + * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); + * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); + * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); + * WRITE_ONCE(Y, 1); + * spin_unlock(S); + * + * it is forbidden that CPU0's critical section executes before CPU1's + * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) + * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments + * preceding the calls to smp_rmb() in try_to_wake_up() for similar + * snippets but "projected" onto two CPUs. + * + * Property (2) upgrades the lock to an RCsc lock. * * Since most load-store architectures implement ACQUIRE with an smp_mb() after * the LL/SC loop, they need no further barriers. Similarly all our TSO diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 91494d7e8e41..3e72a291c401 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) return retval; } +/* Used by tracing, cannot be traced and cannot invoke lockdep. */ +static inline notrace int +srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) +{ + int retval; + + retval = __srcu_read_lock(sp); + return retval; +} + /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * @sp: srcu_struct in which to unregister the old reader. @@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __srcu_read_unlock(sp, idx); } +/* Used by tracing, cannot be traced and cannot call lockdep. */ +static inline notrace void +srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) +{ + __srcu_read_unlock(sp, idx); +} + /** * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock * diff --git a/include/linux/swait.h b/include/linux/swait.h index bf8cb0dee23c..73e06e9986d4 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -16,7 +16,7 @@ * wait-queues, but the semantics are actually completely different, and * every single user we have ever had has been buggy (or pointless). * - * A "swake_up()" only wakes up _one_ waiter, which is not at all what + * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what * "wake_up()" does, and has led to problems. In other cases, it has * been fine, because there's only ever one waiter (kvm), but in that * case gthe whole "simple" wait-queue is just pointless to begin with, @@ -38,8 +38,8 @@ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right * sleeper state. * - * - the exclusive mode; because this requires preserving the list order - * and this is hard. + * - the !exclusive mode; because that leads to O(n) wakeups, everything is + * exclusive. * * - custom wake callback functions; because you cannot give any guarantees * about random code. This also allows swait to be used in RT, such that @@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name * CPU0 - waker CPU1 - waiter * * for (;;) { - * @cond = true; prepare_to_swait(&wq_head, &wait, state); + * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (swait_active(wq_head)) if (@cond) * wake_up(wq_head); break; @@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) return swait_active(wq); } -extern void swake_up(struct swait_queue_head *q); +extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); -extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); -extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); +extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); -/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ +/* as per ___wait_event() but for swait, therefore "exclusive == 1" */ #define ___swait_event(wq, condition, state, ret, cmd) \ ({ \ + __label__ __out; \ struct swait_queue __wait; \ long __ret = ret; \ \ @@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ - break; \ + goto __out; \ } \ \ cmd; \ } \ finish_swait(&wq, &__wait); \ - __ret; \ +__out: __ret; \ }) #define __swait_event(wq, condition) \ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ schedule()) -#define swait_event(wq, condition) \ +#define swait_event_exclusive(wq, condition) \ do { \ if (condition) \ break; \ @@ -208,7 +208,7 @@ do { \ TASK_UNINTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) -#define swait_event_timeout(wq, condition, timeout) \ +#define swait_event_timeout_exclusive(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ @@ -220,7 +220,7 @@ do { \ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ schedule()) -#define swait_event_interruptible(wq, condition) \ +#define swait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ if (!(condition)) \ @@ -233,7 +233,7 @@ do { \ TASK_INTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) -#define swait_event_interruptible_timeout(wq, condition, timeout) \ +#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ @@ -246,7 +246,7 @@ do { \ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) /** - * swait_event_idle - wait without system load contribution + * swait_event_idle_exclusive - wait without system load contribution * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @@ -257,7 +257,7 @@ do { \ * condition and doesn't want to contribute to system load. Signals are * ignored. */ -#define swait_event_idle(wq, condition) \ +#define swait_event_idle_exclusive(wq, condition) \ do { \ if (condition) \ break; \ @@ -270,7 +270,7 @@ do { \ __ret = schedule_timeout(__ret)) /** - * swait_event_idle_timeout - wait up to timeout without load contribution + * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout at which we'll give up in jiffies @@ -288,7 +288,7 @@ do { \ * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ -#define swait_event_idle_timeout(wq, condition, timeout) \ +#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ diff --git a/include/linux/torture.h b/include/linux/torture.h index 66272862070b..61dfd93b6ee4 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -64,6 +64,8 @@ struct torture_random_state { long trs_count; }; #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } +#define DEFINE_TORTURE_RANDOM_PERCPU(name) \ + DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); /* Task shuffler, which causes CPUs to occasionally go idle. */ @@ -79,7 +81,7 @@ void stutter_wait(const char *title); int torture_stutter_init(int s); /* Initialization and cleanup. */ -bool torture_init_begin(char *ttype, bool v); +bool torture_init_begin(char *ttype, int v); void torture_init_end(void); bool torture_cleanup_begin(void); void torture_cleanup_end(void); |