diff options
Diffstat (limited to 'virt/kvm/arm/vgic/vgic.c')
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index e83b7fe4baae..6440b56ec90e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -29,7 +29,7 @@ #define DEBUG_SPINLOCK_BUG_ON(p) #endif -struct vgic_global __section(.hyp.text) kvm_vgic_global_state; +struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,}; /* * Locking order is always: @@ -273,6 +273,18 @@ retry: * no more work for us to do. */ spin_unlock(&irq->irq_lock); + + /* + * We have to kick the VCPU here, because we could be + * queueing an edge-triggered interrupt for which we + * get no EOI maintenance interrupt. In that case, + * while the IRQ is already on the VCPU's AP list, the + * VCPU could have EOI'ed the original interrupt and + * won't see this one until it exits for some other + * reason. + */ + if (vcpu) + kvm_vcpu_kick(vcpu); return false; } @@ -645,6 +657,9 @@ next: /* Sync back the hardware VGIC state into our emulation after a guest's run. */ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) { + if (unlikely(!vgic_initialized(vcpu->kvm))) + return; + vgic_process_maintenance_interrupt(vcpu); vgic_fold_lr_state(vcpu); vgic_prune_ap_list(vcpu); @@ -653,6 +668,9 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) /* Flush our emulation state into the GIC hardware before entering the guest. */ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) { + if (unlikely(!vgic_initialized(vcpu->kvm))) + return; + spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); vgic_flush_lr_state(vcpu); spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |