diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 35 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/vgic-v2-sr.c | 7 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 39 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 31 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 14 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 14 | ||||
-rw-r--r-- | virt/kvm/irqchip.c | 2 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 71 |
9 files changed, 85 insertions, 132 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e2d5b6f988fb..4fde8c7dfcfe 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -405,26 +405,17 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) return (u64)-1; } -static int kvm_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static int kvm_timer_starting_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - kvm_timer_init_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(host_vtimer_irq); - break; - } - - return NOTIFY_OK; + kvm_timer_init_interrupt(NULL); + return 0; } -static struct notifier_block kvm_timer_cpu_nb = { - .notifier_call = kvm_timer_cpu_notify, -}; +static int kvm_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(host_vtimer_irq); + return 0; +} int kvm_timer_hyp_init(void) { @@ -449,12 +440,6 @@ int kvm_timer_hyp_init(void) goto out; } - err = __register_cpu_notifier(&kvm_timer_cpu_nb); - if (err) { - kvm_err("Cannot register timer CPU notifier\n"); - goto out_free; - } - wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; @@ -462,8 +447,10 @@ int kvm_timer_hyp_init(void) } kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); - on_each_cpu(kvm_timer_init_interrupt, NULL, 1); + cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, + "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, + kvm_timer_dying_cpu); goto out; out_free: free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f12b3b277b..3a3a699b7489 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) continue; - if (cpu_if->vgic_elrsr & (1UL << i)) { + if (cpu_if->vgic_elrsr & (1UL << i)) cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; - continue; - } + else + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); writel_relaxed(0, base + GICH_LR0 + (i * 4)); } } diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index c3bfbb981e73..67cb5e948be2 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -2326,32 +2326,18 @@ int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset) return -ENXIO; } -static void vgic_init_maintenance_interrupt(void *info) +static int vgic_starting_cpu(unsigned int cpu) { enable_percpu_irq(vgic->maint_irq, 0); + return 0; } -static int vgic_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static int vgic_dying_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - vgic_init_maintenance_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(vgic->maint_irq); - break; - } - - return NOTIFY_OK; + disable_percpu_irq(vgic->maint_irq); + return 0; } -static struct notifier_block vgic_cpu_nb = { - .notifier_call = vgic_cpu_notify, -}; - static int kvm_vgic_probe(void) { const struct gic_kvm_info *gic_kvm_info; @@ -2392,19 +2378,10 @@ int kvm_vgic_hyp_init(void) return ret; } - ret = __register_cpu_notifier(&vgic_cpu_nb); - if (ret) { - kvm_err("Cannot register vgic CPU notifier\n"); - goto out_free_irq; - } - - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - + cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING, + "AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu, + vgic_dying_cpu); return 0; - -out_free_irq: - free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); - return ret; } int kvm_irq_map_gsi(struct kvm *kvm, diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index a1442f7c9c4d..2c7f0d5a62ea 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -353,32 +353,19 @@ out: /* GENERIC PROBE */ -static void vgic_init_maintenance_interrupt(void *info) +static int vgic_init_cpu_starting(unsigned int cpu) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + return 0; } -static int vgic_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) -{ - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - vgic_init_maintenance_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(kvm_vgic_global_state.maint_irq); - break; - } - return NOTIFY_OK; +static int vgic_init_cpu_dying(unsigned int cpu) +{ + disable_percpu_irq(kvm_vgic_global_state.maint_irq); + return 0; } -static struct notifier_block vgic_cpu_nb = { - .notifier_call = vgic_cpu_notify, -}; - static irqreturn_t vgic_maintenance_handler(int irq, void *data) { /* @@ -434,14 +421,14 @@ int kvm_vgic_hyp_init(void) return ret; } - ret = __register_cpu_notifier(&vgic_cpu_nb); + ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + "AP_KVM_ARM_VGIC_INIT_STARTING", + vgic_init_cpu_starting, vgic_init_cpu_dying); if (ret) { kvm_err("Cannot register vgic CPU notifier\n"); goto out_free_irq; } - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); return 0; diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 059595ec3da0..9f6fab74dce7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, * other thread sync back the IRQ. */ while (irq->vcpu && /* IRQ may have state in an LR somewhere */ - irq->vcpu->cpu != -1) { /* VCPU thread is running */ - BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); + irq->vcpu->cpu != -1) /* VCPU thread is running */ cond_resched_lock(&irq->irq_lock); - } irq->active = new_active_state; if (new_active_state) diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 8ad42c217770..e31405ee5515 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) } } - /* Clear soft pending state when level IRQs have been acked */ - if (irq->config == VGIC_CONFIG_LEVEL && - !(val & GICH_LR_PENDING_BIT)) { - irq->soft_pending = false; - irq->pending = irq->line_level; + /* + * Clear soft pending state when level irqs have been acked. + * Always regenerate the pending state. + */ + if (irq->config == VGIC_CONFIG_LEVEL) { + if (!(val & GICH_LR_PENDING_BIT)) + irq->soft_pending = false; + + irq->pending = irq->line_level || irq->soft_pending; } spin_unlock(&irq->irq_lock); diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 336a46115937..346b4ad12b49 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) } } - /* Clear soft pending state when level irqs have been acked */ - if (irq->config == VGIC_CONFIG_LEVEL && - !(val & ICH_LR_PENDING_BIT)) { - irq->soft_pending = false; - irq->pending = irq->line_level; + /* + * Clear soft pending state when level irqs have been acked. + * Always regenerate the pending state. + */ + if (irq->config == VGIC_CONFIG_LEVEL) { + if (!(val & ICH_LR_PENDING_BIT)) + irq->soft_pending = false; + + irq->pending = irq->line_level || irq->soft_pending; } spin_unlock(&irq->irq_lock); diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index fe84e1a95dd5..8db197bb6c7a 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, lockdep_is_held(&kvm->irq_lock)); - if (gsi < irq_rt->nr_rt_entries) { + if (irq_rt && gsi < irq_rt->nr_rt_entries) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) { entries[n] = *e; ++n; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 37af23052470..2e791367c576 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) put_cpu(); return 0; } +EXPORT_SYMBOL_GPL(vcpu_load); void vcpu_put(struct kvm_vcpu *vcpu) { @@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) preempt_enable(); mutex_unlock(&vcpu->mutex); } +EXPORT_SYMBOL_GPL(vcpu_put); static void ack_flush(void *_completed) { @@ -2935,25 +2937,27 @@ static long kvm_vm_ioctl(struct file *filp, case KVM_SET_GSI_ROUTING: { struct kvm_irq_routing routing; struct kvm_irq_routing __user *urouting; - struct kvm_irq_routing_entry *entries; + struct kvm_irq_routing_entry *entries = NULL; r = -EFAULT; if (copy_from_user(&routing, argp, sizeof(routing))) goto out; r = -EINVAL; - if (routing.nr >= KVM_MAX_IRQ_ROUTES) + if (routing.nr > KVM_MAX_IRQ_ROUTES) goto out; if (routing.flags) goto out; - r = -ENOMEM; - entries = vmalloc(routing.nr * sizeof(*entries)); - if (!entries) - goto out; - r = -EFAULT; - urouting = argp; - if (copy_from_user(entries, urouting->entries, - routing.nr * sizeof(*entries))) - goto out_free_irq_routing; + if (routing.nr) { + r = -ENOMEM; + entries = vmalloc(routing.nr * sizeof(*entries)); + if (!entries) + goto out; + r = -EFAULT; + urouting = argp; + if (copy_from_user(entries, urouting->entries, + routing.nr * sizeof(*entries))) + goto out_free_irq_routing; + } r = kvm_set_irq_routing(kvm, entries, routing.nr, routing.flags); out_free_irq_routing: @@ -3046,6 +3050,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; struct kvm *kvm; + struct file *file; kvm = kvm_create_vm(type); if (IS_ERR(kvm)) @@ -3057,17 +3062,25 @@ static int kvm_dev_ioctl_create_vm(unsigned long type) return r; } #endif - r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); + r = get_unused_fd_flags(O_CLOEXEC); if (r < 0) { kvm_put_kvm(kvm); return r; } + file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); + if (IS_ERR(file)) { + put_unused_fd(r); + kvm_put_kvm(kvm); + return PTR_ERR(file); + } if (kvm_create_vm_debugfs(kvm, r) < 0) { - kvm_put_kvm(kvm); + put_unused_fd(r); + fput(file); return -ENOMEM; } + fd_install(r, file); return r; } @@ -3142,12 +3155,13 @@ static void hardware_enable_nolock(void *junk) } } -static void hardware_enable(void) +static int kvm_starting_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_enable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_nolock(void *junk) @@ -3160,12 +3174,13 @@ static void hardware_disable_nolock(void *junk) kvm_arch_hardware_disable(); } -static void hardware_disable(void) +static int kvm_dying_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_all_nolock(void) @@ -3206,21 +3221,6 @@ static int hardware_enable_all(void) return r; } -static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, - void *v) -{ - val &= ~CPU_TASKS_FROZEN; - switch (val) { - case CPU_DYING: - hardware_disable(); - break; - case CPU_STARTING: - hardware_enable(); - break; - } - return NOTIFY_OK; -} - static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) { @@ -3487,10 +3487,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, return r; } -static struct notifier_block kvm_cpu_notifier = { - .notifier_call = kvm_cpu_hotplug, -}; - static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) @@ -3741,7 +3737,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_1; } - r = register_cpu_notifier(&kvm_cpu_notifier); + r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", + kvm_starting_cpu, kvm_dying_cpu); if (r) goto out_free_2; register_reboot_notifier(&kvm_reboot_notifier); @@ -3795,7 +3792,7 @@ out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); out_free_2: out_free_1: kvm_arch_hardware_unsetup(); @@ -3818,7 +3815,7 @@ void kvm_exit(void) kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); |