diff options
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r-- | arch/x86/xen/spinlock.c | 46 |
1 files changed, 20 insertions, 26 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 973f10e05211..1c8a8816a402 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -9,6 +9,7 @@ #include <linux/log2.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/atomic.h> #include <asm/paravirt.h> #include <asm/qspinlock.h> @@ -21,6 +22,7 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(char *, irq_name); +static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); static bool xen_pvspin = true; static void xen_qlock_kick(int cpu) @@ -40,33 +42,24 @@ static void xen_qlock_kick(int cpu) static void xen_qlock_wait(u8 *byte, u8 val) { int irq = __this_cpu_read(lock_kicker_irq); + atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); /* If kicker interrupts not initialized yet, just spin */ - if (irq == -1) + if (irq == -1 || in_nmi()) return; - /* clear pending */ - xen_clear_irq_pending(irq); - barrier(); - - /* - * We check the byte value after clearing pending IRQ to make sure - * that we won't miss a wakeup event because of the clearing. - * - * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. - * So it is effectively a memory barrier for x86. - */ - if (READ_ONCE(*byte) != val) - return; + /* Detect reentry. */ + atomic_inc(nest_cnt); - /* - * If an interrupt happens here, it will leave the wakeup irq - * pending, which will cause xen_poll_irq() to return - * immediately. - */ + /* If irq pending already and no nested call clear it. */ + if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { + xen_clear_irq_pending(irq); + } else if (READ_ONCE(*byte) == val) { + /* Block until irq becomes pending (or a spurious wakeup) */ + xen_poll_irq(irq); + } - /* Block until irq becomes pending (or perhaps a spurious wakeup) */ - xen_poll_irq(irq); + atomic_dec(nest_cnt); } static irqreturn_t dummy_handler(int irq, void *dev_id) @@ -141,11 +134,12 @@ void __init xen_init_spinlocks(void) printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); __pv_init_lock_hash(); - pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); - pv_lock_ops.wait = xen_qlock_wait; - pv_lock_ops.kick = xen_qlock_kick; - pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); + pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; + pv_ops.lock.queued_spin_unlock = + PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_ops.lock.wait = xen_qlock_wait; + pv_ops.lock.kick = xen_qlock_kick; + pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); } static __init int xen_parse_nopvspin(char *arg) |