summaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2017-03-24 17:25:02 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-09-28 07:29:44 +0200
commitb96f7d881ad94203e997cd2aa7112d4a06d121ef (patch)
tree7c942d7f08610684a3cbce30a3052bde6549b92e /arch/s390/lib
parent8153380379ecc8381f6d55f6497de31a36c75aa5 (diff)
downloadblackbird-op-linux-b96f7d881ad94203e997cd2aa7112d4a06d121ef.tar.gz
blackbird-op-linux-b96f7d881ad94203e997cd2aa7112d4a06d121ef.zip
s390/spinlock: introduce spinlock wait queueing
The queued spinlock code for s390 follows the principles of the common code qspinlock implementation but with a few notable differences. The format of the spinlock_t locking word differs, s390 needs to store the logical CPU number of the lock holder in the spinlock_t to be able to use the diagnose 9c directed yield hypervisor call. The inline code sequences for spin_lock and spin_unlock are nice and short. The inline portion of a spin_lock now typically looks like this: lhi %r0,0 # 0 indicates an empty lock l %r1,0x3a0 # CPU number + 1 from lowcore cs %r0,%r1,<some_lock> # lock operation jnz call_wait # on failure call wait function locked: ... call_wait: la %r2,<some_lock> brasl %r14,arch_spin_lock_wait j locked A spin_unlock is as simple as before: lhi %r0,0 sth %r0,2(%r2) # unlock operation After a CPU has queued itself it may not enable interrupts again for the arch_spin_lock_flags() variant. The arch_spin_lock_wait_flags wait function is removed. To improve performance the code implements opportunistic lock stealing. If the wait function finds a spinlock_t that indicates that the lock is free but there are queued waiters, the CPU may steal the lock up to three times without queueing itself. The lock stealing update the steal counter in the lock word to prevent more than 3 steals. The counter is reset at the time the CPU next in the queue successfully takes the lock. While the queued spinlocks improve performance in a system with dedicated CPUs, in a virtualized environment with continuously overcommitted CPUs the queued spinlocks can have a negative effect on performance. This is due to the fact that a queued CPU that is preempted by the hypervisor will block the queue at some point even without holding the lock. With the classic spinlock it does not matter if a CPU is preempted that waits for the lock. Therefore use the queued spinlock code only if the system runs with dedicated CPUs and fall back to classic spinlocks when running with shared CPUs. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/spinlock.c194
1 files changed, 164 insertions, 30 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index ee73bcca7e6f..6747134227cd 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -8,8 +8,10 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/spinlock.h>
+#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/percpu.h>
#include <asm/io.h>
int spin_retry = -1;
@@ -32,6 +34,40 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+struct spin_wait {
+ struct spin_wait *next, *prev;
+ int node_id;
+} __aligned(32);
+
+static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
+
+#define _Q_LOCK_CPU_OFFSET 0
+#define _Q_LOCK_STEAL_OFFSET 16
+#define _Q_TAIL_IDX_OFFSET 18
+#define _Q_TAIL_CPU_OFFSET 20
+
+#define _Q_LOCK_CPU_MASK 0x0000ffff
+#define _Q_LOCK_STEAL_ADD 0x00010000
+#define _Q_LOCK_STEAL_MASK 0x00030000
+#define _Q_TAIL_IDX_MASK 0x000c0000
+#define _Q_TAIL_CPU_MASK 0xfff00000
+
+#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+void arch_spin_lock_setup(int cpu)
+{
+ struct spin_wait *node;
+ int ix;
+
+ node = per_cpu_ptr(&spin_wait[0], cpu);
+ for (ix = 0; ix < 4; ix++, node++) {
+ memset(node, 0, sizeof(*node));
+ node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
+ (ix << _Q_TAIL_IDX_OFFSET);
+ }
+}
+
static inline int arch_load_niai4(int *lock)
{
int owner;
@@ -60,75 +96,160 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
return expected == old;
}
-void arch_spin_lock_wait(arch_spinlock_t *lp)
+static inline struct spin_wait *arch_spin_decode_tail(int lock)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int ix, cpu;
+
+ ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+ cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
+ return per_cpu_ptr(&spin_wait[ix], cpu - 1);
+}
+
+static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
+{
+ if (lock & _Q_LOCK_CPU_MASK)
+ return lock & _Q_LOCK_CPU_MASK;
+ if (node == NULL || node->prev == NULL)
+ return 0; /* 0 -> no target cpu */
+ while (node->prev)
+ node = node->prev;
+ return node->node_id >> _Q_TAIL_CPU_OFFSET;
+}
+
+static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
+{
+ struct spin_wait *node, *next;
+ int lockval, ix, node_id, tail_id, old, new, owner, count;
+
+ ix = S390_lowcore.spinlock_index++;
+ barrier();
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ node = this_cpu_ptr(&spin_wait[ix]);
+ node->prev = node->next = NULL;
+ node_id = node->node_id;
+
+ /* Enqueue the node for this CPU in the spinlock wait queue */
+ while (1) {
+ old = READ_ONCE(lp->lock);
+ if ((old & _Q_LOCK_CPU_MASK) == 0 &&
+ (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
+ /*
+ * The lock is free but there may be waiters.
+ * With no waiters simply take the lock, if there
+ * are waiters try to steal the lock. The lock may
+ * be stolen three times before the next queued
+ * waiter will get the lock.
+ */
+ new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ goto out;
+ /* lock passing in progress */
+ continue;
+ }
+ /* Make the node of this CPU the new tail. */
+ new = node_id | (old & _Q_LOCK_MASK);
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ break;
+ }
+ /* Set the 'next' pointer of the tail node in the queue */
+ tail_id = old & _Q_TAIL_MASK;
+ if (tail_id != 0) {
+ node->prev = arch_spin_decode_tail(tail_id);
+ WRITE_ONCE(node->prev->next, node);
+ }
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
+ owner = arch_spin_yield_target(old, node);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
+ /* Spin on the CPU local node->prev pointer */
+ if (tail_id != 0) {
+ count = spin_retry;
+ while (READ_ONCE(node->prev) != NULL) {
+ if (count-- >= 0)
+ continue;
+ count = spin_retry;
+ /* Query running state of lock holder again. */
+ owner = arch_spin_yield_target(old, node);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
+ }
+ }
+
+ /* Spin on the lock value in the spinlock_t */
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
- /* Try to get the lock if it is free. */
+ old = READ_ONCE(lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
if (!owner) {
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
- return;
+ tail_id = old & _Q_TAIL_MASK;
+ new = ((tail_id != node_id) ? tail_id : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ break;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
+
+ /* Pass lock_spin job to next CPU in the queue */
+ if (node_id && tail_id != node_id) {
+ /* Wait until the next CPU has set up the 'next' pointer */
+ while ((next = READ_ONCE(node->next)) == NULL)
+ ;
+ next->prev = NULL;
+ }
+
+ out:
+ S390_lowcore.spinlock_index--;
}
-EXPORT_SYMBOL(arch_spin_lock_wait);
-void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
+static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int lockval, old, new, owner, count;
- local_irq_restore(flags);
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
+ owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
+ old = arch_load_niai4(&lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
/* Try to get the lock if it is free. */
if (!owner) {
- local_irq_disable();
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
- return;
- local_irq_restore(flags);
+ new = (old & _Q_TAIL_MASK) | lockval;
+ if (arch_cmpxchg_niai8(&lp->lock, old, new))
+ /* Got the lock */
+ return;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
-EXPORT_SYMBOL(arch_spin_lock_wait_flags);
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+ /* Use classic spinlocks + niai if the steal time is >= 10% */
+ if (test_cpu_flag(CIF_DEDICATED_CPU))
+ arch_spin_lock_queued(lp);
+ else
+ arch_spin_lock_classic(lp);
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
@@ -270,3 +391,16 @@ void arch_lock_relax(int cpu)
smp_yield_cpu(cpu - 1);
}
EXPORT_SYMBOL(arch_lock_relax);
+
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+ int cpu;
+
+ cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
+ if (!cpu)
+ return;
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
+ return;
+ smp_yield_cpu(cpu - 1);
+}
+EXPORT_SYMBOL(arch_spin_relax);
OpenPOWER on IntegriCloud