diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-05-16 15:11:12 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-05-20 08:58:55 +0200 |
commit | 470ada6b1a1d80a173586c036f84e2c3a486ebf9 (patch) | |
tree | 44fd6a61db1666e1c5239392b17b5c6f21ec40b0 /arch/s390 | |
parent | 939c5ae4029e1679bb93f7d09afb8c831db985bd (diff) | |
download | blackbird-op-linux-470ada6b1a1d80a173586c036f84e2c3a486ebf9.tar.gz blackbird-op-linux-470ada6b1a1d80a173586c036f84e2c3a486ebf9.zip |
s390/spinlock: refactor arch_spin_lock_wait[_flags]
Reorder the spinlock wait code to make it more readable.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/lib/spinlock.c | 81 |
1 files changed, 47 insertions, 34 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 1dd282c742b5..5b0e445bc3f3 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) int count; while (1) { - owner = lp->lock; - if (!owner || smp_vcpu_scheduled(~owner)) { - count = spin_retry; - do { - if (arch_spin_is_locked(lp)) - continue; - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) - return; - } while (count-- > 0); - if (MACHINE_IS_LPAR) - continue; + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return; + continue; } - owner = lp->lock; - if (owner) + /* Check if the lock owner is running. */ + if (!smp_vcpu_scheduled(~owner)) { + smp_yield_cpu(~owner); + continue; + } + /* Loop for a while on the lock value. */ + count = spin_retry; + do { + owner = ACCESS_ONCE(lp->lock); + } while (owner && count-- > 0); + if (!owner) + continue; + /* + * For multiple layers of hypervisors, e.g. z/VM + LPAR + * yield the CPU if the lock is still unavailable. + */ + if (!MACHINE_IS_LPAR) smp_yield_cpu(~owner); - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) - return; } } EXPORT_SYMBOL(arch_spin_lock_wait); @@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); while (1) { - owner = lp->lock; - if (!owner || smp_vcpu_scheduled(~owner)) { - count = spin_retry; - do { - if (arch_spin_is_locked(lp)) - continue; - local_irq_disable(); - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) - return; - local_irq_restore(flags); - } while (count-- > 0); - if (MACHINE_IS_LPAR) - continue; + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + local_irq_disable(); + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return; + local_irq_restore(flags); } - owner = lp->lock; - if (owner) + /* Check if the lock owner is running. */ + if (!smp_vcpu_scheduled(~owner)) { + smp_yield_cpu(~owner); + continue; + } + /* Loop for a while on the lock value. */ + count = spin_retry; + do { + owner = ACCESS_ONCE(lp->lock); + } while (owner && count-- > 0); + if (!owner) + continue; + /* + * For multiple layers of hypervisors, e.g. z/VM + LPAR + * yield the CPU if the lock is still unavailable. + */ + if (!MACHINE_IS_LPAR) smp_yield_cpu(~owner); - local_irq_disable(); - if (_raw_compare_and_swap(&lp->lock, 0, cpu)) - return; - local_irq_restore(flags); } } EXPORT_SYMBOL(arch_spin_lock_wait_flags); |