diff options
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-its.c')
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 158 |
1 files changed, 114 insertions, 44 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 07411cf967b9..4660a7d04eea 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid) irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL); if (!irq) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->ap_list); @@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm, * Find the target VCPU and the LPI number for a given devid/eventid pair * and make this IRQ pending, possibly injecting it. * Must be called with the its_lock mutex held. + * Returns 0 on success, a positive error value for any ITS mapping + * related errors and negative error values for generic errors. */ -static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, - u32 devid, u32 eventid) +static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, + u32 devid, u32 eventid) { + struct kvm_vcpu *vcpu; struct its_itte *itte; if (!its->enabled) - return; + return -EBUSY; itte = find_itte(its, devid, eventid); - /* Triggering an unmapped IRQ gets silently dropped. */ - if (itte && its_is_collection_mapped(itte->collection)) { - struct kvm_vcpu *vcpu; - - vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr); - if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) { - spin_lock(&itte->irq->irq_lock); - itte->irq->pending = true; - vgic_queue_irq_unlock(kvm, itte->irq); - } - } + if (!itte || !its_is_collection_mapped(itte->collection)) + return E_ITS_INT_UNMAPPED_INTERRUPT; + + vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr); + if (!vcpu) + return E_ITS_INT_UNMAPPED_INTERRUPT; + + if (!vcpu->arch.vgic_cpu.lpis_enabled) + return -EBUSY; + + spin_lock(&itte->irq->irq_lock); + itte->irq->pending = true; + vgic_queue_irq_unlock(kvm, itte->irq); + + return 0; +} + +static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev) +{ + struct vgic_io_device *iodev; + + if (dev->ops != &kvm_io_gic_ops) + return NULL; + + iodev = container_of(dev, struct vgic_io_device, dev); + + if (iodev->iodev_type != IODEV_ITS) + return NULL; + + return iodev; } /* * Queries the KVM IO bus framework to get the ITS pointer from the given * doorbell address. * We then call vgic_its_trigger_msi() with the decoded data. + * According to the KVM_SIGNAL_MSI API description returns 1 on success. */ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) { u64 address; struct kvm_io_device *kvm_io_dev; struct vgic_io_device *iodev; + int ret; if (!vgic_has_its(kvm)) return -ENODEV; @@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); if (!kvm_io_dev) - return -ENODEV; + return -EINVAL; - iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); + iodev = vgic_get_its_iodev(kvm_io_dev); + if (!iodev) + return -EINVAL; mutex_lock(&iodev->its->its_lock); - vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data); + ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data); mutex_unlock(&iodev->its->its_lock); - return 0; + if (ret < 0) + return ret; + + /* + * KVM_SIGNAL_MSI demands a return value > 0 for success and 0 + * if the guest has blocked the MSI. So we map any LPI mapping + * related error to that. + */ + if (ret) + return 0; + else + return 1; } /* Requires the its_lock to be held. */ @@ -502,7 +539,8 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte) list_del(&itte->itte_list); /* This put matches the get in vgic_add_lpi. */ - vgic_put_irq(kvm, itte->irq); + if (itte->irq) + vgic_put_irq(kvm, itte->irq); kfree(itte); } @@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, struct its_device *device; struct its_collection *collection, *new_coll = NULL; int lpi_nr; + struct vgic_irq *irq; device = find_its_device(its, device_id); if (!device) @@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) return E_ITS_MAPTI_PHYSICALID_OOR; + /* If there is an existing mapping, behavior is UNPREDICTABLE. */ + if (find_itte(its, device_id, event_id)) + return 0; + collection = find_collection(its, coll_id); if (!collection) { int ret = vgic_its_alloc_collection(its, &collection, coll_id); @@ -718,22 +761,28 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, new_coll = collection; } - itte = find_itte(its, device_id, event_id); + itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL); if (!itte) { - itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL); - if (!itte) { - if (new_coll) - vgic_its_free_collection(its, coll_id); - return -ENOMEM; - } - - itte->event_id = event_id; - list_add_tail(&itte->itte_list, &device->itt_head); + if (new_coll) + vgic_its_free_collection(its, coll_id); + return -ENOMEM; } + itte->event_id = event_id; + list_add_tail(&itte->itte_list, &device->itt_head); + itte->collection = collection; itte->lpi = lpi_nr; - itte->irq = vgic_add_lpi(kvm, lpi_nr); + + irq = vgic_add_lpi(kvm, lpi_nr); + if (IS_ERR(irq)) { + if (new_coll) + vgic_its_free_collection(its, coll_id); + its_free_itte(kvm, itte); + return PTR_ERR(irq); + } + itte->irq = irq; + update_affinity_itte(kvm, itte); /* @@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its, u32 msi_data = its_cmd_get_id(its_cmd); u64 msi_devid = its_cmd_get_deviceid(its_cmd); - vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); - - return 0; + return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); } /* @@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu) its_sync_lpi_pending_table(vcpu); } -static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) +static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its) { struct vgic_io_device *iodev = &its->iodev; int ret; - if (its->initialized) - return 0; + if (!its->initialized) + return -EBUSY; if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) return -ENXIO; @@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) KVM_VGIC_V3_ITS_SIZE, &iodev->dev); mutex_unlock(&kvm->slots_lock); - if (!ret) - its->initialized = true; - return ret; } @@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev, if (type != KVM_VGIC_ITS_ADDR_TYPE) return -ENODEV; - if (its->initialized) - return -EBUSY; - if (copy_from_user(&addr, uaddr, sizeof(addr))) return -EFAULT; @@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev, case KVM_DEV_ARM_VGIC_GRP_CTRL: switch (attr->attr) { case KVM_DEV_ARM_VGIC_CTRL_INIT: - return vgic_its_init_its(dev->kvm, its); + its->initialized = true; + + return 0; } break; } @@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void) return kvm_register_device_ops(&kvm_arm_vgic_its_ops, KVM_DEV_TYPE_ARM_VGIC_ITS); } + +/* + * Registers all ITSes with the kvm_io_bus framework. + * To follow the existing VGIC initialization sequence, this has to be + * done as late as possible, just before the first VCPU runs. + */ +int vgic_register_its_iodevs(struct kvm *kvm) +{ + struct kvm_device *dev; + int ret = 0; + + list_for_each_entry(dev, &kvm->devices, vm_node) { + if (dev->ops != &kvm_arm_vgic_its_ops) + continue; + + ret = vgic_register_its_iodev(kvm, dev->private); + if (ret) + return ret; + /* + * We don't need to care about tearing down previously + * registered ITSes, as the kvm_io_bus framework removes + * them for us if the VM gets destroyed. + */ + } + + return ret; +} |