diff options
author | Joel Stanley <joel@jms.id.au> | 2016-08-04 13:24:41 +0930 |
---|---|---|
committer | Joel Stanley <joel@jms.id.au> | 2016-08-04 13:24:41 +0930 |
commit | b916113d820adc856aad6b8ebc1a25403ff7f293 (patch) | |
tree | abddf239d912c4868faf9fbc69c67d980f74c7df /include/asm-generic/qspinlock.h | |
parent | 2e4db6c6be91ade074c1a150a6db2aeb4920e25c (diff) | |
parent | b05965f284db3e086022f4e318e46cb5bffb1376 (diff) | |
download | talos-obmc-linux-openbmc-4.4-20160804-1.tar.gz talos-obmc-linux-openbmc-4.4-20160804-1.zip |
Merge tag 'v4.4.16' into dev-4.4openbmc-4.4-20160804-1
This is the 4.4.16 stable release
Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'include/asm-generic/qspinlock.h')
-rw-r--r-- | include/asm-generic/qspinlock.h | 53 |
1 files changed, 17 insertions, 36 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 7d633f19e38a..1885fc44b1bc 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -21,37 +21,33 @@ #include <asm-generic/qspinlock_types.h> /** + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock + * @lock : Pointer to queued spinlock structure + * + * There is a very slight possibility of live-lock if the lockers keep coming + * and the waiter is just unfortunate enough to not see any unlock state. + */ +#ifndef queued_spin_unlock_wait +extern void queued_spin_unlock_wait(struct qspinlock *lock); +#endif + +/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ +#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* - * queued_spin_lock_slowpath() can ACQUIRE the lock before - * issuing the unordered store that sets _Q_LOCKED_VAL. - * - * See both smp_cond_acquire() sites for more detail. - * - * This however means that in code like: - * - * spin_lock(A) spin_lock(B) - * spin_unlock_wait(B) spin_is_locked(A) - * do_something() do_something() - * - * Both CPUs can end up running do_something() because the store - * setting _Q_LOCKED_VAL will pass through the loads in - * spin_unlock_wait() and/or spin_is_locked(). + * See queued_spin_unlock_wait(). * - * Avoid this by issuing a full memory barrier between the spin_lock() - * and the loads in spin_unlock_wait() and spin_is_locked(). - * - * Note that regular mutual exclusion doesn't care about this - * delayed store. + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL + * isn't immediately observable. */ - smp_mb(); - return atomic_read(&lock->val) & _Q_LOCKED_MASK; + return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -121,21 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) } #endif -/** - * queued_spin_unlock_wait - wait until current lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -static inline void queued_spin_unlock_wait(struct qspinlock *lock) -{ - /* See queued_spin_is_locked() */ - smp_mb(); - while (atomic_read(&lock->val) & _Q_LOCKED_MASK) - cpu_relax(); -} - #ifndef virt_spin_lock static __always_inline bool virt_spin_lock(struct qspinlock *lock) { |