From 7cbc084dc22ca4adb8fd741502e43f29b577abfb Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 09:43:59 +0100 Subject: KVM: arm/arm64: vgic: streamline vgic_update_irq_pending() interface We actually don't use the irq_phys_map parameter in vgic_update_irq_pending(), so let's just remove it. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 60668a7f319a..f6c61720763d 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1521,7 +1521,6 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) } static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, - struct irq_phys_map *map, unsigned int irq_num, bool level) { struct vgic_dist *dist = &kvm->arch.vgic; @@ -1660,7 +1659,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, if (map) return -EINVAL; - return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level); + return vgic_update_irq_pending(kvm, cpuid, irq_num, level); } /** @@ -1686,7 +1685,7 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, if (ret) return ret; - return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level); + return vgic_update_irq_pending(kvm, cpuid, map->virt_irq, level); } static irqreturn_t vgic_maintenance_handler(int irq, void *data) -- cgit v1.2.1 From 4f551a3d96a2de85a041ee60e806bda1d5b06255 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 09:48:02 +0100 Subject: KVM: arm/arm64: vgic: avoid map in kvm_vgic_inject_mapped_irq() When we want to inject a hardware mapped IRQ into a guest, we actually only need the virtual IRQ number from the irq_phys_map. So let's pass this number directly from the arch timer to the VGIC to avoid using the map as a parameter. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 2 +- virt/kvm/arm/vgic.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 409db3304471..a9c6c1c59d38 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -177,7 +177,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq, timer->irq.level); ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, - timer->map, + timer->map->virt_irq, timer->irq.level); WARN_ON(ret); } diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index f6c61720763d..81c557c0c3fd 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1666,7 +1666,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic * @kvm: The VM structure pointer * @cpuid: The CPU for PPIs - * @map: Pointer to a irq_phys_map structure describing the mapping + * @virt_irq: The virtual IRQ to be injected * @level: Edge-triggered: true: to trigger the interrupt * false: to ignore the call * Level-sensitive true: raise the input signal @@ -1677,7 +1677,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, * being HIGH and 0 being LOW and all devices being active-HIGH. */ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, - struct irq_phys_map *map, bool level) + unsigned int virt_irq, bool level) { int ret; @@ -1685,7 +1685,7 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, if (ret) return ret; - return vgic_update_irq_pending(kvm, cpuid, map->virt_irq, level); + return vgic_update_irq_pending(kvm, cpuid, virt_irq, level); } static irqreturn_t vgic_maintenance_handler(int irq, void *data) -- cgit v1.2.1 From e262f4193638fff2de458f0c70284f0cb50926a7 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 10:03:49 +0100 Subject: KVM: arm/arm64: vgic: avoid map in kvm_vgic_map_is_active() For getting the active state of a mapped IRQ, we actually only need the virtual IRQ number, not the pointer to the mapping entry. Pass the virtual IRQ number from the arch timer to the VGIC directly. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 6 ++---- virt/kvm/arm/vgic.c | 6 +++--- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index a9c6c1c59d38..37f82c1c6bb7 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -274,10 +274,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) * to ensure that hardware interrupts from the timer triggers a guest * exit. */ - if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) - phys_active = true; - else - phys_active = false; + phys_active = timer->irq.level || + kvm_vgic_map_is_active(vcpu, timer->map->virt_irq); /* * We want to avoid hitting the (re)distributor as much as diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 81c557c0c3fd..2fd43a6146a9 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1102,18 +1102,18 @@ static bool dist_active_irq(struct kvm_vcpu *vcpu) return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); } -bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) { int i; for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { struct vgic_lr vlr = vgic_get_lr(vcpu, i); - if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) + if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE) return true; } - return vgic_irq_is_active(vcpu, map->virt_irq); + return vgic_irq_is_active(vcpu, virt_irq); } /* -- cgit v1.2.1 From 63306c28ac92bdf9e41aef367708d762f9f725f2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 10:04:06 +0100 Subject: KVM: arm/arm64: vgic: avoid map in kvm_vgic_unmap_phys_irq() kvm_vgic_unmap_phys_irq() only needs the virtual IRQ number, so let's just pass that between the arch timer and the VGIC to get rid of the irq_phys_map pointer. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 2 +- virt/kvm/arm/vgic.c | 11 ++++------- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 37f82c1c6bb7..962b442d0d77 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -486,7 +486,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) timer_disarm(timer); if (timer->map) - kvm_vgic_unmap_phys_irq(vcpu, timer->map); + kvm_vgic_unmap_phys_irq(vcpu, timer->map->virt_irq); } void kvm_timer_enable(struct kvm *kvm) diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 2fd43a6146a9..06abd59dcbe3 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1812,25 +1812,22 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu) /** * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping * @vcpu: The VCPU pointer - * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq + * @virt_irq: The virtual IRQ number to be unmapped * * Remove an existing mapping between virtual and physical interrupts. */ -int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map) +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct irq_phys_map_entry *entry; struct list_head *root; - if (!map) - return -EINVAL; - - root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq); + root = vgic_get_irq_phys_map_list(vcpu, virt_irq); spin_lock(&dist->irq_phys_map_lock); list_for_each_entry(entry, root, entry) { - if (&entry->map == map) { + if (entry->map.virt_irq == virt_irq) { list_del_rcu(&entry->entry); call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu); break; -- cgit v1.2.1 From b452cb52072d21f026e38ac7af36a969bab2ed22 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 4 Jun 2016 15:41:00 +0100 Subject: KVM: arm/arm64: Remove the IRQ field from struct irq_phys_map The communication of a Linux IRQ number from outside the VGIC to the vgic was a leftover from the day when the vgic code cared about how a particular device injects virtual interrupts mapped to a physical interrupt. We can safely remove this notion, leaving all physical IRQ handling to be done in the device driver (the arch timer in this case), which makes room for a saner API for the new VGIC. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Eric Auger --- virt/kvm/arm/arch_timer.c | 23 +++++++++++++++++++++-- virt/kvm/arm/vgic.c | 28 ++++++---------------------- 2 files changed, 27 insertions(+), 24 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 962b442d0d77..e45895a0153d 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -300,7 +301,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) if (timer->active_cleared_last && !phys_active) return; - ret = irq_set_irqchip_state(timer->map->irq, + ret = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, phys_active); WARN_ON(ret); @@ -333,6 +334,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct irq_phys_map *map; + struct irq_desc *desc; + struct irq_data *data; + int phys_irq; /* * The vcpu timer irq number cannot be determined in @@ -351,11 +355,26 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, timer->cntv_ctl = 0; kvm_timer_update_state(vcpu); + /* + * Find the physical IRQ number corresponding to the host_vtimer_irq + */ + desc = irq_to_desc(host_vtimer_irq); + if (!desc) { + kvm_err("%s: no interrupt descriptor\n", __func__); + return -EINVAL; + } + + data = irq_desc_get_irq_data(desc); + while (data->parent_data) + data = data->parent_data; + + phys_irq = data->hwirq; + /* * Tell the VGIC that the virtual interrupt is tied to a * physical interrupt. We do that once per VCPU. */ - map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq); + map = kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq); if (WARN_ON(IS_ERR(map))) return PTR_ERR(map); diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 06abd59dcbe3..a7e496abfca2 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1711,38 +1711,24 @@ static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu, /** * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ * @vcpu: The VCPU pointer - * @virt_irq: The virtual irq number - * @irq: The Linux IRQ number + * @virt_irq: The virtual IRQ number for the guest + * @phys_irq: The hardware IRQ number of the host * * Establish a mapping between a guest visible irq (@virt_irq) and a - * Linux irq (@irq). On injection, @virt_irq will be associated with - * the physical interrupt represented by @irq. This mapping can be + * hardware irq (@phys_irq). On injection, @virt_irq will be associated with + * the physical interrupt represented by @phys_irq. This mapping can be * established multiple times as long as the parameters are the same. * * Returns a valid pointer on success, and an error pointer otherwise */ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, - int virt_irq, int irq) + int virt_irq, int phys_irq) { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); struct irq_phys_map *map; struct irq_phys_map_entry *entry; - struct irq_desc *desc; - struct irq_data *data; - int phys_irq; - desc = irq_to_desc(irq); - if (!desc) { - kvm_err("%s: no interrupt descriptor\n", __func__); - return ERR_PTR(-EINVAL); - } - - data = irq_desc_get_irq_data(desc); - while (data->parent_data) - data = data->parent_data; - - phys_irq = data->hwirq; /* Create a new mapping */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -1755,8 +1741,7 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, map = vgic_irq_map_search(vcpu, virt_irq); if (map) { /* Make sure this mapping matches */ - if (map->phys_irq != phys_irq || - map->irq != irq) + if (map->phys_irq != phys_irq) map = ERR_PTR(-EINVAL); /* Found an existing, valid mapping */ @@ -1766,7 +1751,6 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, map = &entry->map; map->virt_irq = virt_irq; map->phys_irq = phys_irq; - map->irq = irq; list_add_tail_rcu(&entry->entry, root); -- cgit v1.2.1 From a7e33ad9b25552b75a2523cc598db8bcd218ede5 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 11:03:02 +0100 Subject: KVM: arm/arm64: arch_timer: Remove irq_phys_map Now that the interface between the arch timer and the VGIC does not require passing the irq_phys_map entry pointer anymore, let's remove it from the virtual arch timer and use the virtual IRQ number instead directly. The remaining pointer returned by kvm_vgic_map_phys_irq() will be removed in the following patch. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e45895a0153d..458d4d8ce795 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -175,10 +175,10 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) timer->active_cleared_last = false; timer->irq.level = new_level; - trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq, + trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq, timer->irq.level); ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, - timer->map->virt_irq, + timer->irq.irq, timer->irq.level); WARN_ON(ret); } @@ -276,7 +276,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) * exit. */ phys_active = timer->irq.level || - kvm_vgic_map_is_active(vcpu, timer->map->virt_irq); + kvm_vgic_map_is_active(vcpu, timer->irq.irq); /* * We want to avoid hitting the (re)distributor as much as @@ -378,7 +378,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, if (WARN_ON(IS_ERR(map))) return PTR_ERR(map); - timer->map = map; return 0; } @@ -504,8 +503,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; timer_disarm(timer); - if (timer->map) - kvm_vgic_unmap_phys_irq(vcpu, timer->map->virt_irq); + kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); } void kvm_timer_enable(struct kvm *kvm) -- cgit v1.2.1 From c8eb3f6b9bc31abc0ab3230737fde1639c8b1ea6 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 13 Apr 2016 11:49:07 +0100 Subject: KVM: arm/arm64: vgic: Remove irq_phys_map from interface Now that the virtual arch timer does not care about the irq_phys_map anymore, let's rework kvm_vgic_map_phys_irq() to return an error value instead. Any reference to that mapping can later be done by passing the correct combination of VCPU and virtual IRQ number. This makes the irq_phys_map handling completely private to the VGIC code. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 7 +------ virt/kvm/arm/vgic.c | 15 +++++++-------- 2 files changed, 8 insertions(+), 14 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 458d4d8ce795..3232105e6afd 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -333,7 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct irq_phys_map *map; struct irq_desc *desc; struct irq_data *data; int phys_irq; @@ -374,11 +373,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, * Tell the VGIC that the virtual interrupt is tied to a * physical interrupt. We do that once per VCPU. */ - map = kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq); - if (WARN_ON(IS_ERR(map))) - return PTR_ERR(map); - - return 0; + return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq); } void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index a7e496abfca2..91d42a8a5f72 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1719,21 +1719,20 @@ static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu, * the physical interrupt represented by @phys_irq. This mapping can be * established multiple times as long as the parameters are the same. * - * Returns a valid pointer on success, and an error pointer otherwise + * Returns 0 on success or an error value otherwise. */ -struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, - int virt_irq, int phys_irq) +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq) { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); struct irq_phys_map *map; struct irq_phys_map_entry *entry; - + int ret = 0; /* Create a new mapping */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) - return ERR_PTR(-ENOMEM); + return -ENOMEM; spin_lock(&dist->irq_phys_map_lock); @@ -1742,7 +1741,7 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, if (map) { /* Make sure this mapping matches */ if (map->phys_irq != phys_irq) - map = ERR_PTR(-EINVAL); + ret = -EINVAL; /* Found an existing, valid mapping */ goto out; @@ -1758,9 +1757,9 @@ out: spin_unlock(&dist->irq_phys_map_lock); /* If we've found a hit in the existing list, free the useless * entry */ - if (IS_ERR(map) || map != &entry->map) + if (ret || map != &entry->map) kfree(entry); - return map; + return ret; } static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, -- cgit v1.2.1 From 41a54482c010d8806cf56e1501bb3b61fac14cf9 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 18 May 2016 16:26:00 +0100 Subject: KVM: arm/arm64: Move timer IRQ map to latest possible time We are about to modify the VGIC to allocate all data structures dynamically and store mapped IRQ information on a per-IRQ struct, which is indeed allocated dynamically at init time. Therefore, we cannot record the mapped IRQ info from the timer at timer reset time like it's done now, because VCPU reset happens before timer init. A possible later time to do this is on the first run of a per VCPU, it just requires us to move the enable state to be a per-VCPU state and do the lookup of the physical IRQ number when we are about to run the VCPU. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara --- virt/kvm/arm/arch_timer.c | 66 ++++++++++++++++++++++++++------------------- virt/kvm/arm/hyp/timer-sr.c | 5 ++-- 2 files changed, 40 insertions(+), 31 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 3232105e6afd..e2d5b6f988fb 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -197,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu) * because the guest would never see the interrupt. Instead wait * until we call this function from kvm_timer_flush_hwstate. */ - if (!vgic_initialized(vcpu->kvm)) + if (!vgic_initialized(vcpu->kvm) || !timer->enabled) return -ENODEV; if (kvm_timer_should_fire(vcpu) != timer->irq.level) @@ -333,9 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct irq_desc *desc; - struct irq_data *data; - int phys_irq; /* * The vcpu timer irq number cannot be determined in @@ -354,26 +351,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, timer->cntv_ctl = 0; kvm_timer_update_state(vcpu); - /* - * Find the physical IRQ number corresponding to the host_vtimer_irq - */ - desc = irq_to_desc(host_vtimer_irq); - if (!desc) { - kvm_err("%s: no interrupt descriptor\n", __func__); - return -EINVAL; - } - - data = irq_desc_get_irq_data(desc); - while (data->parent_data) - data = data->parent_data; - - phys_irq = data->hwirq; - - /* - * Tell the VGIC that the virtual interrupt is tied to a - * physical interrupt. We do that once per VCPU. - */ - return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq); + return 0; } void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) @@ -501,10 +479,40 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); } -void kvm_timer_enable(struct kvm *kvm) +int kvm_timer_enable(struct kvm_vcpu *vcpu) { - if (kvm->arch.timer.enabled) - return; + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct irq_desc *desc; + struct irq_data *data; + int phys_irq; + int ret; + + if (timer->enabled) + return 0; + + /* + * Find the physical IRQ number corresponding to the host_vtimer_irq + */ + desc = irq_to_desc(host_vtimer_irq); + if (!desc) { + kvm_err("%s: no interrupt descriptor\n", __func__); + return -EINVAL; + } + + data = irq_desc_get_irq_data(desc); + while (data->parent_data) + data = data->parent_data; + + phys_irq = data->hwirq; + + /* + * Tell the VGIC that the virtual interrupt is tied to a + * physical interrupt. We do that once per VCPU. + */ + ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq); + if (ret) + return ret; + /* * There is a potential race here between VCPUs starting for the first @@ -515,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm) * the arch timers are enabled. */ if (timecounter && wqueue) - kvm->arch.timer.enabled = 1; + timer->enabled = 1; + + return 0; } void kvm_timer_init(struct kvm *kvm) diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index ea00d69e7078..798866a8d875 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -24,11 +24,10 @@ /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) { - struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; u64 val; - if (kvm->arch.timer.enabled) { + if (timer->enabled) { timer->cntv_ctl = read_sysreg_el0(cntv_ctl); timer->cntv_cval = read_sysreg_el0(cntv_cval); } @@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) val |= CNTHCTL_EL1PCTEN; write_sysreg(val, cnthctl_el2); - if (kvm->arch.timer.enabled) { + if (timer->enabled) { write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); write_sysreg_el0(timer->cntv_cval, cntv_cval); isb(); -- cgit v1.2.1 From 2db4c104fa2a9af12c07433642e2e4fee37fe2fd Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 6 Apr 2016 14:48:53 +0200 Subject: KVM: arm/arm64: Get rid of vgic_cpu->nr_lr The number of list registers is a property of the underlying system, not of emulated VGIC CPU interface. As we are about to move this variable to global state in the new vgic for clarity, move it from the legacy implementation as well to make the merge of the new code easier. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Andre Przywara --- virt/kvm/arm/hyp/vgic-v2-sr.c | 12 +++++++----- virt/kvm/arm/vgic-v2.c | 4 +++- virt/kvm/arm/vgic.c | 12 ++---------- 3 files changed, 12 insertions(+), 16 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index 674bdf8ecf4f..caac41f48815 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -21,11 +21,13 @@ #include +extern struct vgic_params vgic_v2_params; + static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, void __iomem *base) { struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - int nr_lr = vcpu->arch.vgic_cpu.nr_lr; + int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr; u32 eisr0, eisr1; int i; bool expect_mi; @@ -67,7 +69,7 @@ static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) { struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - int nr_lr = vcpu->arch.vgic_cpu.nr_lr; + int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr; u32 elrsr0, elrsr1; elrsr0 = readl_relaxed(base + GICH_ELRSR0); @@ -86,7 +88,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) { struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - int nr_lr = vcpu->arch.vgic_cpu.nr_lr; + int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr; int i; for (i = 0; i < nr_lr; i++) { @@ -141,13 +143,13 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_dist *vgic = &kvm->arch.vgic; void __iomem *base = kern_hyp_va(vgic->vctrl_base); - int i, nr_lr; + int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr; + int i; u64 live_lrs = 0; if (!base) return; - nr_lr = vcpu->arch.vgic_cpu.nr_lr; for (i = 0; i < nr_lr; i++) if (cpu_if->vgic_lr[i] & GICH_LR_STATE) diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index 7e826c9b2b0a..334cd7a89106 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c @@ -171,7 +171,7 @@ static const struct vgic_ops vgic_v2_ops = { .enable = vgic_v2_enable, }; -static struct vgic_params vgic_v2_params; +struct vgic_params __section(.hyp.text) vgic_v2_params; static void vgic_cpu_init_lrs(void *params) { @@ -201,6 +201,8 @@ int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info, const struct resource *vctrl_res = &gic_kvm_info->vctrl; const struct resource *vcpu_res = &gic_kvm_info->vcpu; + memset(vgic, 0, sizeof(*vgic)); + if (!gic_kvm_info->maint_irq) { kvm_err("error getting vgic maintenance irq\n"); ret = -ENXIO; diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 91d42a8a5f72..f76bb6407243 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -690,12 +690,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, */ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) { - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; u64 elrsr = vgic_get_elrsr(vcpu); unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr); int i; - for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) { + for_each_clear_bit(i, elrsr_ptr, vgic->nr_lr) { struct vgic_lr lr = vgic_get_lr(vcpu, i); /* @@ -1106,7 +1105,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) { int i; - for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { + for (i = 0; i < vgic->nr_lr; i++) { struct vgic_lr vlr = vgic_get_lr(vcpu, i); if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE) @@ -1866,13 +1865,6 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) return -ENOMEM; } - /* - * Store the number of LRs per vcpu, so we don't have to go - * all the way to the distributor structure to find out. Only - * assembly code should use this one. - */ - vgic_cpu->nr_lr = vgic->nr_lr; - return 0; } -- cgit v1.2.1 From 83091db981e105d97562d3ed3ffe676e21927e3a Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 29 Mar 2016 14:29:28 +0200 Subject: KVM: arm/arm64: Fix MMIO emulation data handling When the kernel was handling a guest MMIO read access internally, we need to copy the emulation result into the run->mmio structure in order for the kvm_handle_mmio_return() function to pick it up and inject the result back into the guest. Currently the only user of kvm_io_bus for ARM is the VGIC, which did this copying itself, so this was not causing issues so far. But with the upcoming new vgic implementation we need this done properly. Update the kvm_handle_mmio_return description and cleanup the code to only perform a single copying when needed. Code and commit message inspired by Andre Przywara. Reported-by: Andre Przywara Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Marc Zyngier Reviewed-by: Andre Przywara --- virt/kvm/arm/vgic.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index f76bb6407243..c3bfbb981e73 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -819,7 +819,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_io_device *iodev = container_of(this, struct vgic_io_device, dev); - struct kvm_run *run = vcpu->run; const struct vgic_io_range *range; struct kvm_exit_mmio mmio; bool updated_state; @@ -848,12 +847,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, updated_state = false; } spin_unlock(&dist->lock); - run->mmio.is_write = is_write; - run->mmio.len = len; - run->mmio.phys_addr = addr; - memcpy(run->mmio.data, val, len); - - kvm_handle_mmio_return(vcpu, run); if (updated_state) vgic_kick_vcpus(vcpu->kvm); -- cgit v1.2.1 From 2defaff48aaf16072a6eac4cf8234917197dfa72 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 7 Mar 2016 17:32:29 +0700 Subject: KVM: arm/arm64: pmu: abstract access to number of SPIs Currently the PMU uses a member of the struct vgic_dist directly, which not only breaks abstraction, but will fail with the new VGIC. Abstract this access in the VGIC header file and refactor the validity check in the PMU code. Signed-off-by: Andre Przywara --- virt/kvm/arm/pmu.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 575c7aa30d7e..a027569facfa 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -436,7 +436,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) return 0; } -static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) +#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) + +/* + * For one VM the interrupt type must be same for each vcpu. + * As a PPI, the interrupt number is the same for all vcpus, + * while as an SPI it must be a separate number per vcpu. + */ +static bool pmu_irq_is_valid(struct kvm *kvm, int irq) { int i; struct kvm_vcpu *vcpu; @@ -445,7 +452,7 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) if (!kvm_arm_pmu_irq_initialized(vcpu)) continue; - if (is_ppi) { + if (irq_is_ppi(irq)) { if (vcpu->arch.pmu.irq_num != irq) return false; } else { @@ -457,7 +464,6 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) return true; } - int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { switch (attr->attr) { @@ -471,14 +477,11 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (get_user(irq, uaddr)) return -EFAULT; - /* - * The PMU overflow interrupt could be a PPI or SPI, but for one - * VM the interrupt type must be same for each vcpu. As a PPI, - * the interrupt number is the same for all vcpus, while as an - * SPI it must be a separate number per vcpu. - */ - if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs || - !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS)) + /* The PMU overflow interrupt can be a PPI or a valid SPI. */ + if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq))) + return -EINVAL; + + if (!pmu_irq_is_valid(vcpu->kvm, irq)) return -EINVAL; if (kvm_arm_pmu_irq_initialized(vcpu)) -- cgit v1.2.1 From 44bfc42e94cd76a0bd44f3fce98d4a7b76f31bc0 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 4 May 2016 14:35:48 +0100 Subject: KVM: arm/arm64: move GICv2 emulation defines into arm-gic-v3.h As (some) GICv3 hosts can emulate a GICv2, some GICv2 specific masks for the list register definition also apply to GICv3 LRs. At the moment we have those definitions in the KVM VGICv3 implementation, so let's move them into the GICv3 header file to have them automatically defined. Signed-off-by: Andre Przywara Acked-by: Marc Zyngier --- virt/kvm/arm/vgic-v3.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index c02a1b1cf855..75b02fa86436 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -29,12 +29,6 @@ #include #include -/* These are for GICv2 emulation only */ -#define GICH_LR_VIRTUALID (0x3ffUL << 0) -#define GICH_LR_PHYSID_CPUID_SHIFT (10) -#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) -#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1) - static u32 ich_vtr_el2; static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) @@ -43,7 +37,7 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) - lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; + lr_desc.irq = val & ICH_LR_VIRTUAL_ID_MASK; else lr_desc.irq = val & GICH_LR_VIRTUALID; -- cgit v1.2.1 From 64a959d66e47039833e7f3c8d7e50fd4afa078ca Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 24 Nov 2015 16:51:12 +0100 Subject: KVM: arm/arm64: vgic-new: Add acccessor to new struct vgic_irq instance The new VGIC implementation centers around a struct vgic_irq instance per virtual IRQ. Provide a function to retrieve the right instance for a given IRQ number and (in case of private interrupts) the right VCPU. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Acked-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic.c | 41 +++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 22 ++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic.c create mode 100644 virt/kvm/arm/vgic/vgic.h (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c new file mode 100644 index 000000000000..fb45537451b2 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic.c @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include "vgic.h" + +struct vgic_global __section(.hyp.text) kvm_vgic_global_state; + +struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + u32 intid) +{ + /* SGIs and PPIs */ + if (intid <= VGIC_MAX_PRIVATE) + return &vcpu->arch.vgic_cpu.private_irqs[intid]; + + /* SPIs */ + if (intid <= VGIC_MAX_SPI) + return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; + + /* LPIs are not yet covered */ + if (intid >= VGIC_MIN_LPI) + return NULL; + + WARN(1, "Looking up struct vgic_irq for reserved INTID"); + return NULL; +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h new file mode 100644 index 000000000000..61b8d226081b --- /dev/null +++ b/virt/kvm/arm/vgic/vgic.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __KVM_ARM_VGIC_NEW_H__ +#define __KVM_ARM_VGIC_NEW_H__ + +struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + u32 intid); + +#endif -- cgit v1.2.1 From 81eeb95ddbabbb998a6b39f762bc7edaa2a979b4 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 25 Nov 2015 10:02:16 -0800 Subject: KVM: arm/arm64: vgic-new: Implement virtual IRQ injection Provide a vgic_queue_irq_unlock() function which decides whether a given IRQ needs to be queued to a VCPU's ap_list. This should be called whenever an IRQ becomes pending or enabled, either as a result of userspace injection, from in-kernel emulated devices like the architected timer or from MMIO accesses to the distributor emulation. Also provides the necessary functions to allow userland to inject an IRQ to a guest. Since this is the first code that starts using our locking mechanism, we add some (hopefully) clear documentation of our locking strategy and requirements along with this patch. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara --- virt/kvm/arm/vgic/vgic.c | 211 +++++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 1 + 2 files changed, 212 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index fb45537451b2..ada1d02a02b4 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -19,8 +19,31 @@ #include "vgic.h" +#define CREATE_TRACE_POINTS +#include "../trace.h" + +#ifdef CONFIG_DEBUG_SPINLOCK +#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) +#else +#define DEBUG_SPINLOCK_BUG_ON(p) +#endif + struct vgic_global __section(.hyp.text) kvm_vgic_global_state; +/* + * Locking order is always: + * vgic_cpu->ap_list_lock + * vgic_irq->irq_lock + * + * (that is, always take the ap_list_lock before the struct vgic_irq lock). + * + * When taking more than one ap_list_lock at the same time, always take the + * lowest numbered VCPU's ap_list_lock first, so: + * vcpuX->vcpu_id < vcpuY->vcpu_id: + * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); + * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); + */ + struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid) { @@ -39,3 +62,191 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, WARN(1, "Looking up struct vgic_irq for reserved INTID"); return NULL; } + +/** + * kvm_vgic_target_oracle - compute the target vcpu for an irq + * + * @irq: The irq to route. Must be already locked. + * + * Based on the current state of the interrupt (enabled, pending, + * active, vcpu and target_vcpu), compute the next vcpu this should be + * given to. Return NULL if this shouldn't be injected at all. + * + * Requires the IRQ lock to be held. + */ +static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) +{ + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); + + /* If the interrupt is active, it must stay on the current vcpu */ + if (irq->active) + return irq->vcpu ? : irq->target_vcpu; + + /* + * If the IRQ is not active but enabled and pending, we should direct + * it to its configured target VCPU. + * If the distributor is disabled, pending interrupts shouldn't be + * forwarded. + */ + if (irq->enabled && irq->pending) { + if (unlikely(irq->target_vcpu && + !irq->target_vcpu->kvm->arch.vgic.enabled)) + return NULL; + + return irq->target_vcpu; + } + + /* If neither active nor pending and enabled, then this IRQ should not + * be queued to any VCPU. + */ + return NULL; +} + +/* + * Only valid injection if changing level for level-triggered IRQs or for a + * rising edge. + */ +static bool vgic_validate_injection(struct vgic_irq *irq, bool level) +{ + switch (irq->config) { + case VGIC_CONFIG_LEVEL: + return irq->line_level != level; + case VGIC_CONFIG_EDGE: + return level; + } + + return false; +} + +/* + * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. + * Do the queuing if necessary, taking the right locks in the right order. + * Returns true when the IRQ was queued, false otherwise. + * + * Needs to be entered with the IRQ lock already held, but will return + * with all locks dropped. + */ +bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) +{ + struct kvm_vcpu *vcpu; + + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); + +retry: + vcpu = vgic_target_oracle(irq); + if (irq->vcpu || !vcpu) { + /* + * If this IRQ is already on a VCPU's ap_list, then it + * cannot be moved or modified and there is no more work for + * us to do. + * + * Otherwise, if the irq is not pending and enabled, it does + * not need to be inserted into an ap_list and there is also + * no more work for us to do. + */ + spin_unlock(&irq->irq_lock); + return false; + } + + /* + * We must unlock the irq lock to take the ap_list_lock where + * we are going to insert this new pending interrupt. + */ + spin_unlock(&irq->irq_lock); + + /* someone can do stuff here, which we re-check below */ + + spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + spin_lock(&irq->irq_lock); + + /* + * Did something change behind our backs? + * + * There are two cases: + * 1) The irq lost its pending state or was disabled behind our + * backs and/or it was queued to another VCPU's ap_list. + * 2) Someone changed the affinity on this irq behind our + * backs and we are now holding the wrong ap_list_lock. + * + * In both cases, drop the locks and retry. + */ + + if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { + spin_unlock(&irq->irq_lock); + spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + + spin_lock(&irq->irq_lock); + goto retry; + } + + list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); + irq->vcpu = vcpu; + + spin_unlock(&irq->irq_lock); + spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + + kvm_vcpu_kick(vcpu); + + return true; +} + +static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, + unsigned int intid, bool level, + bool mapped_irq) +{ + struct kvm_vcpu *vcpu; + struct vgic_irq *irq; + int ret; + + trace_vgic_update_irq_pending(cpuid, intid, level); + + vcpu = kvm_get_vcpu(kvm, cpuid); + if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) + return -EINVAL; + + irq = vgic_get_irq(kvm, vcpu, intid); + if (!irq) + return -EINVAL; + + if (irq->hw != mapped_irq) + return -EINVAL; + + spin_lock(&irq->irq_lock); + + if (!vgic_validate_injection(irq, level)) { + /* Nothing to see here, move along... */ + spin_unlock(&irq->irq_lock); + return 0; + } + + if (irq->config == VGIC_CONFIG_LEVEL) { + irq->line_level = level; + irq->pending = level || irq->soft_pending; + } else { + irq->pending = true; + } + + vgic_queue_irq_unlock(kvm, irq); + + return 0; +} + +/** + * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic + * @kvm: The VM structure pointer + * @cpuid: The CPU for PPIs + * @intid: The INTID to inject a new state to. + * @level: Edge-triggered: true: to trigger the interrupt + * false: to ignore the call + * Level-sensitive true: raise the input signal + * false: lower the input signal + * + * The VGIC is not concerned with devices being active-LOW or active-HIGH for + * level-sensitive interrupts. You can think of the level parameter as 1 + * being HIGH and 0 being LOW and all devices being active-HIGH. + */ +int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level) +{ + return vgic_update_irq_pending(kvm, cpuid, intid, level, false); +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 61b8d226081b..c6257679e091 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -18,5 +18,6 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); +bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); #endif -- cgit v1.2.1 From 8e4447457965a7ddc576d0b9fd34c9905eb0416d Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 25 Nov 2015 10:02:16 -0800 Subject: KVM: arm/arm64: vgic-new: Add IRQ sorting Adds the sorting function to cover the case where you have more IRQs to consider than you have LRs. We now consider priorities. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index ada1d02a02b4..bce17dea4677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -16,6 +16,7 @@ #include #include +#include #include "vgic.h" @@ -102,6 +103,62 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) return NULL; } +/* + * The order of items in the ap_lists defines how we'll pack things in LRs as + * well, the first items in the list being the first things populated in the + * LRs. + * + * A hard rule is that active interrupts can never be pushed out of the LRs + * (and therefore take priority) since we cannot reliably trap on deactivation + * of IRQs and therefore they have to be present in the LRs. + * + * Otherwise things should be sorted by the priority field and the GIC + * hardware support will take care of preemption of priority groups etc. + * + * Return negative if "a" sorts before "b", 0 to preserve order, and positive + * to sort "b" before "a". + */ +static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); + struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); + bool penda, pendb; + int ret; + + spin_lock(&irqa->irq_lock); + spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); + + if (irqa->active || irqb->active) { + ret = (int)irqb->active - (int)irqa->active; + goto out; + } + + penda = irqa->enabled && irqa->pending; + pendb = irqb->enabled && irqb->pending; + + if (!penda || !pendb) { + ret = (int)pendb - (int)penda; + goto out; + } + + /* Both pending and enabled, sort by priority */ + ret = irqa->priority - irqb->priority; +out: + spin_unlock(&irqb->irq_lock); + spin_unlock(&irqa->irq_lock); + return ret; +} + +/* Must be called with the ap_list_lock held */ +static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); + + list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); +} + /* * Only valid injection if changing level for level-triggered IRQs or for a * rising edge. -- cgit v1.2.1 From 0919e84c0fc1fc73525fdcedefab89ea8460f697 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 26 Nov 2015 17:19:25 +0000 Subject: KVM: arm/arm64: vgic-new: Add IRQ sync/flush framework Implement the framework for syncing IRQs between our emulation and the list registers, which represent the guest's view of IRQs. This is done in kvm_vgic_flush_hwstate and kvm_vgic_sync_hwstate, which gets called on guest entry and exit. The code talking to the actual GICv2/v3 hardware is added in the following patches. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic.c | 192 +++++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 2 + 2 files changed, 194 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index bce17dea4677..08a862a98442 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -307,3 +307,195 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, { return vgic_update_irq_pending(kvm, cpuid, intid, level, false); } + +/** + * vgic_prune_ap_list - Remove non-relevant interrupts from the list + * + * @vcpu: The VCPU pointer + * + * Go over the list of "interesting" interrupts, and prune those that we + * won't have to consider in the near future. + */ +static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq, *tmp; + +retry: + spin_lock(&vgic_cpu->ap_list_lock); + + list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { + struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; + + spin_lock(&irq->irq_lock); + + BUG_ON(vcpu != irq->vcpu); + + target_vcpu = vgic_target_oracle(irq); + + if (!target_vcpu) { + /* + * We don't need to process this interrupt any + * further, move it off the list. + */ + list_del(&irq->ap_list); + irq->vcpu = NULL; + spin_unlock(&irq->irq_lock); + continue; + } + + if (target_vcpu == vcpu) { + /* We're on the right CPU */ + spin_unlock(&irq->irq_lock); + continue; + } + + /* This interrupt looks like it has to be migrated. */ + + spin_unlock(&irq->irq_lock); + spin_unlock(&vgic_cpu->ap_list_lock); + + /* + * Ensure locking order by always locking the smallest + * ID first. + */ + if (vcpu->vcpu_id < target_vcpu->vcpu_id) { + vcpuA = vcpu; + vcpuB = target_vcpu; + } else { + vcpuA = target_vcpu; + vcpuB = vcpu; + } + + spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); + spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, + SINGLE_DEPTH_NESTING); + spin_lock(&irq->irq_lock); + + /* + * If the affinity has been preserved, move the + * interrupt around. Otherwise, it means things have + * changed while the interrupt was unlocked, and we + * need to replay this. + * + * In all cases, we cannot trust the list not to have + * changed, so we restart from the beginning. + */ + if (target_vcpu == vgic_target_oracle(irq)) { + struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; + + list_del(&irq->ap_list); + irq->vcpu = target_vcpu; + list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); + } + + spin_unlock(&irq->irq_lock); + spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); + spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); + goto retry; + } + + spin_unlock(&vgic_cpu->ap_list_lock); +} + +static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu) +{ +} + +static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) +{ +} + +/* Requires the irq_lock to be held. */ +static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, + struct vgic_irq *irq, int lr) +{ + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); +} + +static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) +{ +} + +static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) +{ +} + +/* Requires the ap_list_lock to be held. */ +static int compute_ap_list_depth(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq; + int count = 0; + + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); + + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { + spin_lock(&irq->irq_lock); + /* GICv2 SGIs can count for more than one... */ + if (vgic_irq_is_sgi(irq->intid) && irq->source) + count += hweight8(irq->source); + else + count++; + spin_unlock(&irq->irq_lock); + } + return count; +} + +/* Requires the VCPU's ap_list_lock to be held. */ +static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq; + int count = 0; + + DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); + + if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) { + vgic_set_underflow(vcpu); + vgic_sort_ap_list(vcpu); + } + + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { + spin_lock(&irq->irq_lock); + + if (unlikely(vgic_target_oracle(irq) != vcpu)) + goto next; + + /* + * If we get an SGI with multiple sources, try to get + * them in all at once. + */ + do { + vgic_populate_lr(vcpu, irq, count++); + } while (irq->source && count < kvm_vgic_global_state.nr_lr); + +next: + spin_unlock(&irq->irq_lock); + + if (count == kvm_vgic_global_state.nr_lr) + break; + } + + vcpu->arch.vgic_cpu.used_lrs = count; + + /* Nuke remaining LRs */ + for ( ; count < kvm_vgic_global_state.nr_lr; count++) + vgic_clear_lr(vcpu, count); +} + +/* Sync back the hardware VGIC state into our emulation after a guest's run. */ +void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) +{ + vgic_process_maintenance_interrupt(vcpu); + vgic_fold_lr_state(vcpu); + vgic_prune_ap_list(vcpu); +} + +/* Flush our emulation state into the GIC hardware before entering the guest. */ +void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) +{ + spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + vgic_flush_lr_state(vcpu); + spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index c6257679e091..29b96b96a30b 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -16,6 +16,8 @@ #ifndef __KVM_ARM_VGIC_NEW_H__ #define __KVM_ARM_VGIC_NEW_H__ +#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) + struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); -- cgit v1.2.1 From 140b086dd19771410915a924db2e635c2b51a0f4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 26 Nov 2015 17:19:25 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv2 world switch backend Processing maintenance interrupts and accessing the list registers are dependent on the host's GIC version. Introduce vgic-v2.c to contain GICv2 specific functions. Implement the GICv2 specific code for syncing the emulation state into the VGIC registers. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-v2.c | 176 ++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.c | 6 ++ virt/kvm/arm/vgic/vgic.h | 6 ++ 3 files changed, 188 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-v2.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c new file mode 100644 index 000000000000..fb5e65ceffd0 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "vgic.h" + +/* + * Call this function to convert a u64 value to an unsigned long * bitmask + * in a way that works on both 32-bit and 64-bit LE and BE platforms. + * + * Warning: Calling this function may modify *val. + */ +static unsigned long *u64_to_bitmask(u64 *val) +{ +#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32 + *val = (*val >> 32) | (*val << 32); +#endif + return (unsigned long *)val; +} + +void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) +{ + struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; + + if (cpuif->vgic_misr & GICH_MISR_EOI) { + u64 eisr = cpuif->vgic_eisr; + unsigned long *eisr_bmap = u64_to_bitmask(&eisr); + int lr; + + for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) { + u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID; + + WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE); + + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); + } + } + + /* check and disable underflow maintenance IRQ */ + cpuif->vgic_hcr &= ~GICH_HCR_UIE; + + /* + * In the next iterations of the vcpu loop, if we sync the + * vgic state after flushing it, but before entering the guest + * (this happens for pending signals and vmid rollovers), then + * make sure we don't pick up any old maintenance interrupts + * here. + */ + cpuif->vgic_eisr = 0; +} + +void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) +{ + struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; + + cpuif->vgic_hcr |= GICH_HCR_UIE; +} + +/* + * transfer the content of the LRs back into the corresponding ap_list: + * - active bit is transferred as is + * - pending bit is + * - transferred as is in case of edge sensitive IRQs + * - set to the line-level (resample time) for level sensitive IRQs + */ +void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; + int lr; + + for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) { + u32 val = cpuif->vgic_lr[lr]; + u32 intid = val & GICH_LR_VIRTUALID; + struct vgic_irq *irq; + + irq = vgic_get_irq(vcpu->kvm, vcpu, intid); + + spin_lock(&irq->irq_lock); + + /* Always preserve the active bit */ + irq->active = !!(val & GICH_LR_ACTIVE_BIT); + + /* Edge is the only case where we preserve the pending bit */ + if (irq->config == VGIC_CONFIG_EDGE && + (val & GICH_LR_PENDING_BIT)) { + irq->pending = true; + + if (vgic_irq_is_sgi(intid)) { + u32 cpuid = val & GICH_LR_PHYSID_CPUID; + + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + irq->source |= (1 << cpuid); + } + } + + /* Clear soft pending state when level IRQs have been acked */ + if (irq->config == VGIC_CONFIG_LEVEL && + !(val & GICH_LR_PENDING_BIT)) { + irq->soft_pending = false; + irq->pending = irq->line_level; + } + + spin_unlock(&irq->irq_lock); + } +} + +/* + * Populates the particular LR with the state of a given IRQ: + * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq + * - for a level sensitive IRQ the pending state value is unchanged; + * it is dictated directly by the input level + * + * If @irq describes an SGI with multiple sources, we choose the + * lowest-numbered source VCPU and clear that bit in the source bitmap. + * + * The irq_lock must be held by the caller. + */ +void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) +{ + u32 val = irq->intid; + + if (irq->pending) { + val |= GICH_LR_PENDING_BIT; + + if (irq->config == VGIC_CONFIG_EDGE) + irq->pending = false; + + if (vgic_irq_is_sgi(irq->intid)) { + u32 src = ffs(irq->source); + + BUG_ON(!src); + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; + irq->source &= ~(1 << (src - 1)); + if (irq->source) + irq->pending = true; + } + } + + if (irq->active) + val |= GICH_LR_ACTIVE_BIT; + + if (irq->hw) { + val |= GICH_LR_HW; + val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; + } else { + if (irq->config == VGIC_CONFIG_LEVEL) + val |= GICH_LR_EOI; + } + + /* The GICv2 LR only holds five bits of priority. */ + val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT; + + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; +} + +void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; +} diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 08a862a98442..44d2533ac84e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -400,10 +400,12 @@ retry: static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu) { + vgic_v2_process_maintenance(vcpu); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) { + vgic_v2_fold_lr_state(vcpu); } /* Requires the irq_lock to be held. */ @@ -411,14 +413,18 @@ static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) { DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); + + vgic_v2_populate_lr(vcpu, irq, lr); } static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) { + vgic_v2_clear_lr(vcpu, lr); } static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) { + vgic_v2_set_underflow(vcpu); } /* Requires the ap_list_lock to be held. */ diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 29b96b96a30b..0db490e491ef 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -22,4 +22,10 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); +void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu); +void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); +void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); +void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); +void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); + #endif -- cgit v1.2.1 From 59529f69f5048e50dcde3434661981c01f8208b4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 30 Nov 2015 13:09:53 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 world switch backend As the GICv3 virtual interface registers differ from their GICv2 siblings, we need different handlers for processing maintenance interrupts and reading/writing to the LRs. Implement the respective handler functions and connect them to existing code to be called if the host is using a GICv3. Signed-off-by: Marc Zyngier Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-v3.c | 162 ++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.c | 25 +++++-- virt/kvm/arm/vgic/vgic.h | 29 ++++++++ 3 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 virt/kvm/arm/vgic/vgic-v3.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c new file mode 100644 index 000000000000..fb547da7a43d --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -0,0 +1,162 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "vgic.h" + +void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; + u32 model = vcpu->kvm->arch.vgic.vgic_model; + + if (cpuif->vgic_misr & ICH_MISR_EOI) { + unsigned long eisr_bmap = cpuif->vgic_eisr; + int lr; + + for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) { + u32 intid; + u64 val = cpuif->vgic_lr[lr]; + + if (model == KVM_DEV_TYPE_ARM_VGIC_V3) + intid = val & ICH_LR_VIRTUAL_ID_MASK; + else + intid = val & GICH_LR_VIRTUALID; + + WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); + + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); + } + + /* + * In the next iterations of the vcpu loop, if we sync + * the vgic state after flushing it, but before + * entering the guest (this happens for pending + * signals and vmid rollovers), then make sure we + * don't pick up any old maintenance interrupts here. + */ + cpuif->vgic_eisr = 0; + } + + cpuif->vgic_hcr &= ~ICH_HCR_UIE; +} + +void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; + + cpuif->vgic_hcr |= ICH_HCR_UIE; +} + +void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; + u32 model = vcpu->kvm->arch.vgic.vgic_model; + int lr; + + for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) { + u64 val = cpuif->vgic_lr[lr]; + u32 intid; + struct vgic_irq *irq; + + if (model == KVM_DEV_TYPE_ARM_VGIC_V3) + intid = val & ICH_LR_VIRTUAL_ID_MASK; + else + intid = val & GICH_LR_VIRTUALID; + irq = vgic_get_irq(vcpu->kvm, vcpu, intid); + + spin_lock(&irq->irq_lock); + + /* Always preserve the active bit */ + irq->active = !!(val & ICH_LR_ACTIVE_BIT); + + /* Edge is the only case where we preserve the pending bit */ + if (irq->config == VGIC_CONFIG_EDGE && + (val & ICH_LR_PENDING_BIT)) { + irq->pending = true; + + if (vgic_irq_is_sgi(intid) && + model == KVM_DEV_TYPE_ARM_VGIC_V2) { + u32 cpuid = val & GICH_LR_PHYSID_CPUID; + + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; + irq->source |= (1 << cpuid); + } + } + + /* Clear soft pending state when level irqs have been acked */ + if (irq->config == VGIC_CONFIG_LEVEL && + !(val & ICH_LR_PENDING_BIT)) { + irq->soft_pending = false; + irq->pending = irq->line_level; + } + + spin_unlock(&irq->irq_lock); + } +} + +/* Requires the irq to be locked already */ +void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) +{ + u32 model = vcpu->kvm->arch.vgic.vgic_model; + u64 val = irq->intid; + + if (irq->pending) { + val |= ICH_LR_PENDING_BIT; + + if (irq->config == VGIC_CONFIG_EDGE) + irq->pending = false; + + if (vgic_irq_is_sgi(irq->intid) && + model == KVM_DEV_TYPE_ARM_VGIC_V2) { + u32 src = ffs(irq->source); + + BUG_ON(!src); + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; + irq->source &= ~(1 << (src - 1)); + if (irq->source) + irq->pending = true; + } + } + + if (irq->active) + val |= ICH_LR_ACTIVE_BIT; + + if (irq->hw) { + val |= ICH_LR_HW; + val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; + } else { + if (irq->config == VGIC_CONFIG_LEVEL) + val |= ICH_LR_EOI; + } + + /* + * We currently only support Group1 interrupts, which is a + * known defect. This needs to be addressed at some point. + */ + if (model == KVM_DEV_TYPE_ARM_VGIC_V3) + val |= ICH_LR_GROUP; + + val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; + + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; +} + +void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; +} diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 44d2533ac84e..0bf0d2060053 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -400,12 +400,18 @@ retry: static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu) { - vgic_v2_process_maintenance(vcpu); + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_process_maintenance(vcpu); + else + vgic_v3_process_maintenance(vcpu); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) { - vgic_v2_fold_lr_state(vcpu); + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_fold_lr_state(vcpu); + else + vgic_v3_fold_lr_state(vcpu); } /* Requires the irq_lock to be held. */ @@ -414,17 +420,26 @@ static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, { DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); - vgic_v2_populate_lr(vcpu, irq, lr); + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_populate_lr(vcpu, irq, lr); + else + vgic_v3_populate_lr(vcpu, irq, lr); } static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) { - vgic_v2_clear_lr(vcpu, lr); + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_clear_lr(vcpu, lr); + else + vgic_v3_clear_lr(vcpu, lr); } static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) { - vgic_v2_set_underflow(vcpu); + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_set_underflow(vcpu); + else + vgic_v3_set_underflow(vcpu); } /* Requires the ap_list_lock to be held. */ diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 0db490e491ef..81b1a20dfbc5 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -28,4 +28,33 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); +#ifdef CONFIG_KVM_ARM_VGIC_V3 +void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu); +void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); +void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); +void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); +void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); +#else +static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) +{ +} + +static inline void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) +{ +} + +static inline void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, + struct vgic_irq *irq, int lr) +{ +} + +static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) +{ +} + +static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) +{ +} +#endif + #endif -- cgit v1.2.1 From 90eee56c5f9081686035972dfcddfd6c85cafdf9 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 7 Dec 2015 15:30:38 +0000 Subject: KVM: arm/arm64: vgic-new: Implement kvm_vgic_vcpu_pending_irq Tell KVM whether a particular VCPU has an IRQ that needs handling in the guest. This is used to decide whether a VCPU is runnable. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall Reviewed-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 0bf0d2060053..7e010087224c 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -520,3 +520,28 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) vgic_flush_lr_state(vcpu); spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); } + +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq; + bool pending = false; + + if (!vcpu->kvm->arch.vgic.enabled) + return false; + + spin_lock(&vgic_cpu->ap_list_lock); + + list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { + spin_lock(&irq->irq_lock); + pending = irq->pending && irq->enabled; + spin_unlock(&irq->irq_lock); + + if (pending) + break; + } + + spin_unlock(&vgic_cpu->ap_list_lock); + + return pending; +} -- cgit v1.2.1 From 4493b1c4866a03963a35be7d157c911a617a3694 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 26 Apr 2016 11:06:12 +0100 Subject: KVM: arm/arm64: vgic-new: Add MMIO handling framework Add an MMIO handling framework to the VGIC emulation: Each register is described by its offset, size (or number of bits per IRQ, if applicable) and the read/write handler functions. We provide initialization macros to describe each GIC register later easily. Separate dispatch functions for read and write accesses are connected to the kvm_io_bus framework and binary-search for the responsible register handler based on the offset address within the region. We convert the incoming data (referenced by a pointer) to the host's endianess and use pass-by-value to hand the data over to the actual handler functions. The register handler prototype and the endianess conversion are courtesy of Christoffer Dall. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio.c | 184 ++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 99 +++++++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-mmio.c create mode 100644 virt/kvm/arm/vgic/vgic-mmio.h (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c new file mode 100644 index 000000000000..012b82bec918 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -0,0 +1,184 @@ +/* + * VGIC MMIO handling functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "vgic.h" +#include "vgic-mmio.h" + +unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return 0; +} + +unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return -1UL; +} + +void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, + unsigned int len, unsigned long val) +{ + /* Ignore */ +} + +static int match_region(const void *key, const void *elt) +{ + const unsigned int offset = (unsigned long)key; + const struct vgic_register_region *region = elt; + + if (offset < region->reg_offset) + return -1; + + if (offset >= region->reg_offset + region->len) + return 1; + + return 0; +} + +/* Find the proper register handler entry given a certain address offset. */ +static const struct vgic_register_region * +vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions, + unsigned int offset) +{ + return bsearch((void *)(uintptr_t)offset, region, nr_regions, + sizeof(region[0]), match_region); +} + +/* + * kvm_mmio_read_buf() returns a value in a format where it can be converted + * to a byte array and be directly observed as the guest wanted it to appear + * in memory if it had done the store itself, which is LE for the GIC, as the + * guest knows the GIC is always LE. + * + * We convert this value to the CPUs native format to deal with it as a data + * value. + */ +unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len) +{ + unsigned long data = kvm_mmio_read_buf(val, len); + + switch (len) { + case 1: + return data; + case 2: + return le16_to_cpu(data); + case 4: + return le32_to_cpu(data); + default: + return le64_to_cpu(data); + } +} + +/* + * kvm_mmio_write_buf() expects a value in a format such that if converted to + * a byte array it is observed as the guest would see it if it could perform + * the load directly. Since the GIC is LE, and the guest knows this, the + * guest expects a value in little endian format. + * + * We convert the data value from the CPUs native format to LE so that the + * value is returned in the proper format. + */ +void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, + unsigned long data) +{ + switch (len) { + case 1: + break; + case 2: + data = cpu_to_le16(data); + break; + case 4: + data = cpu_to_le32(data); + break; + default: + data = cpu_to_le64(data); + } + + kvm_mmio_write_buf(buf, len, data); +} + +static +struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) +{ + return container_of(dev, struct vgic_io_device, dev); +} + +static bool check_region(const struct vgic_register_region *region, + gpa_t addr, int len) +{ + if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) + return true; + if ((region->access_flags & VGIC_ACCESS_32bit) && + len == sizeof(u32) && !(addr & 3)) + return true; + if ((region->access_flags & VGIC_ACCESS_64bit) && + len == sizeof(u64) && !(addr & 7)) + return true; + + return false; +} + +static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); + const struct vgic_register_region *region; + struct kvm_vcpu *r_vcpu; + unsigned long data; + + region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, + addr - iodev->base_addr); + if (!region || !check_region(region, addr, len)) { + memset(val, 0, len); + return 0; + } + + r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; + data = region->read(r_vcpu, addr, len); + vgic_data_host_to_mmio_bus(val, len, data); + return 0; +} + +static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); + const struct vgic_register_region *region; + struct kvm_vcpu *r_vcpu; + unsigned long data = vgic_data_mmio_bus_to_host(val, len); + + region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, + addr - iodev->base_addr); + if (!region) + return 0; + + if (!check_region(region, addr, len)) + return 0; + + r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; + region->write(r_vcpu, addr, len, data); + return 0; +} + +struct kvm_io_device_ops kvm_io_gic_ops = { + .read = dispatch_mmio_read, + .write = dispatch_mmio_write, +}; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h new file mode 100644 index 000000000000..3023ecf221c6 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __KVM_ARM_VGIC_MMIO_H__ +#define __KVM_ARM_VGIC_MMIO_H__ + +struct vgic_register_region { + unsigned int reg_offset; + unsigned int len; + unsigned int bits_per_irq; + unsigned int access_flags; + unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr, + unsigned int len); + void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, + unsigned long val); +}; + +extern struct kvm_io_device_ops kvm_io_gic_ops; + +#define VGIC_ACCESS_8bit 1 +#define VGIC_ACCESS_32bit 2 +#define VGIC_ACCESS_64bit 4 + +/* + * Generate a mask that covers the number of bytes required to address + * up to 1024 interrupts, each represented by bits. This assumes + * that is a power of two. + */ +#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) + +/* + * (addr & mask) gives us the byte offset for the INT ID, so we want to + * divide this with 'bytes per irq' to get the INT ID, which is given + * by '(bits) / 8'. But we do this with fixed-point-arithmetic and + * take advantage of the fact that division by a fraction equals + * multiplication with the inverted fraction, and scale up both the + * numerator and denominator with 8 to support at most 64 bits per IRQ: + */ +#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ + 64 / (bits) / 8) + +/* + * Some VGIC registers store per-IRQ information, with a different number + * of bits per IRQ. For those registers this macro is used. + * The _WITH_LENGTH version instantiates registers with a fixed length + * and is mutually exclusive with the _PER_IRQ version. + */ +#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \ + { \ + .reg_offset = off, \ + .bits_per_irq = bpi, \ + .len = bpi * 1024 / 8, \ + .access_flags = acc, \ + .read = rd, \ + .write = wr, \ + } + +#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \ + { \ + .reg_offset = off, \ + .bits_per_irq = 0, \ + .len = length, \ + .access_flags = acc, \ + .read = rd, \ + .write = wr, \ + } + +int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu, + struct vgic_register_region *reg_desc, + struct vgic_io_device *region, + int nr_irqs, bool offset_private); + +unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len); + +void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, + unsigned long data); + +unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, + unsigned int len, unsigned long val); + +#endif -- cgit v1.2.1 From fb848db39661a1243f6ae939ef7e9251a765b972 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 26 Apr 2016 21:32:49 +0100 Subject: KVM: arm/arm64: vgic-new: Add GICv2 MMIO handling framework Create vgic-mmio-v2.c to describe GICv2 emulation specific handlers using the initializer macros provided by the VGIC MMIO framework. Provide a function to register the GICv2 distributor registers to the kvm_io_bus framework. The actual handler functions are still stubs in this patch. Signed-off-by: Andre Przywara Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 76 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.c | 26 ++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 2 ++ virt/kvm/arm/vgic/vgic.h | 2 ++ 4 files changed, 106 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-mmio-v2.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c new file mode 100644 index 000000000000..a3e31a93a3fb --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -0,0 +1,76 @@ +/* + * VGICv2 MMIO handling functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "vgic.h" +#include "vgic-mmio.h" + +static const struct vgic_register_region vgic_v2_dist_registers[] = { + REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, + vgic_mmio_read_raz, vgic_mmio_write_wi, 12, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP, + vgic_mmio_read_rao, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG, + vgic_mmio_read_raz, vgic_mmio_write_wi, 2, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT, + vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET, + vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), +}; + +unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) +{ + dev->regions = vgic_v2_dist_registers; + dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); + + kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); + + return SZ_4K; +} diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 012b82bec918..1a977654681d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -182,3 +182,29 @@ struct kvm_io_device_ops kvm_io_gic_ops = { .read = dispatch_mmio_read, .write = dispatch_mmio_write, }; + +int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, + enum vgic_type type) +{ + struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; + int ret = 0; + unsigned int len; + + switch (type) { + case VGIC_V2: + len = vgic_v2_init_dist_iodev(io_device); + break; + default: + BUG_ON(1); + } + + io_device->base_addr = dist_base_address; + io_device->redist_vcpu = NULL; + + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, + len, &io_device->dev); + mutex_unlock(&kvm->slots_lock); + + return ret; +} diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 3023ecf221c6..5b928d4b197e 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -96,4 +96,6 @@ unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); + #endif diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 81b1a20dfbc5..fd9acaa1e305 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -27,6 +27,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); +int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, + enum vgic_type); #ifdef CONFIG_KVM_ARM_VGIC_V3 void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu); -- cgit v1.2.1 From 2b0cda8789654bfcebca397daebc37aff081bd75 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 26 Apr 2016 11:06:47 +0100 Subject: KVM: arm/arm64: vgic-new: Add CTLR, TYPER and IIDR handlers Those three registers are v2 emulation specific, so their implementation lives entirely in vgic-mmio-v2.c. Also they are handled in one function, as their implementation is pretty simple. When the guest enables the distributor, we kick all VCPUs to get potentially pending interrupts serviced. Signed-off-by: Marc Zyngier Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 46 +++++++++++++++++++++++++++++++++++++++- virt/kvm/arm/vgic/vgic.c | 15 +++++++++++++ virt/kvm/arm/vgic/vgic.h | 4 ++++ 3 files changed, 64 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index a3e31a93a3fb..d812c933708a 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -20,9 +20,53 @@ #include "vgic.h" #include "vgic-mmio.h" +static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 value; + + switch (addr & 0x0c) { + case GIC_DIST_CTRL: + value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0; + break; + case GIC_DIST_CTR: + value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + value = (value >> 5) - 1; + value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; + break; + case GIC_DIST_IIDR: + value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); + break; + default: + return 0; + } + + return value; +} + +static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + bool was_enabled = dist->enabled; + + switch (addr & 0x0c) { + case GIC_DIST_CTRL: + dist->enabled = val & GICD_ENABLE; + if (!was_enabled && dist->enabled) + vgic_kick_vcpus(vcpu->kvm); + break; + case GIC_DIST_CTR: + case GIC_DIST_IIDR: + /* Nothing to do */ + return; + } +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, - vgic_mmio_read_raz, vgic_mmio_write_wi, 12, + vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP, vgic_mmio_read_rao, vgic_mmio_write_wi, 1, diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 7e010087224c..12ae84b4931f 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -545,3 +545,18 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) return pending; } + +void vgic_kick_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int c; + + /* + * We've injected an interrupt, time to find out who deserves + * a good kick... + */ + kvm_for_each_vcpu(c, vcpu, kvm) { + if (kvm_vgic_vcpu_pending_irq(vcpu)) + kvm_vcpu_kick(vcpu); + } +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index fd9acaa1e305..cf620157e1e4 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -16,11 +16,15 @@ #ifndef __KVM_ARM_VGIC_NEW_H__ #define __KVM_ARM_VGIC_NEW_H__ +#define PRODUCT_ID_KVM 0x4b /* ASCII code K */ +#define IMPLEMENTER_ARM 0x43b + #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); +void vgic_kick_vcpus(struct kvm *kvm); void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu); void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); -- cgit v1.2.1 From fd122e620983003c376aca56892ac14a34a38d57 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 14:33:05 +0000 Subject: KVM: arm/arm64: vgic-new: Add ENABLE registers handlers As the enable register handlers are shared between the v2 and v3 emulation, their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. Signed-off-by: Andre Przywara Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 +-- virt/kvm/arm/vgic/vgic-mmio.c | 56 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 11 ++++++++ 3 files changed, 69 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index d812c933708a..d5355b502f4a 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -72,10 +72,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_rao, vgic_mmio_write_wi, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, vgic_mmio_read_raz, vgic_mmio_write_wi, 1, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 1a977654681d..32ed8dbd93d6 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -39,6 +39,62 @@ void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, /* Ignore */ } +/* + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value + * of the enabled bit, so there is only one function for both here. + */ +unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; + int i; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->enabled) + value |= (1U << i); + } + + return value; +} + +void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + irq->enabled = true; + vgic_queue_irq_unlock(vcpu->kvm, irq); + } +} + +void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + irq->enabled = false; + + spin_unlock(&irq->irq_lock); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 5b928d4b197e..57e19fe8df55 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + +void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); #endif -- cgit v1.2.1 From 96b298000db48360e49a1f8f9edc6d2b9c1b2548 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 14:33:41 +0000 Subject: KVM: arm/arm64: vgic-new: Add PENDING registers handlers The pending register handlers are shared between the v2 and v3 emulation, so their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. For level triggered interrupts the real line level is unaffected by this write, so we keep this state separate and combine it with the device's level to get the actual pending state. Signed-off-by: Andre Przywara Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 +-- virt/kvm/arm/vgic/vgic-mmio.c | 60 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 12 ++++++++ 3 files changed, 74 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index d5355b502f4a..c13a7089bc9a 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -78,10 +78,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_pending, vgic_mmio_write_spending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_raz, vgic_mmio_write_wi, 1, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 32ed8dbd93d6..d8dc8f6480dd 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -95,6 +95,66 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, } } +unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; + int i; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->pending) + value |= (1U << i); + } + + return value; +} + +void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + irq->pending = true; + if (irq->config == VGIC_CONFIG_LEVEL) + irq->soft_pending = true; + + vgic_queue_irq_unlock(vcpu->kvm, irq); + } +} + +void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + if (irq->config == VGIC_CONFIG_LEVEL) { + irq->soft_pending = false; + irq->pending = irq->line_level; + } else { + irq->pending = false; + } + + spin_unlock(&irq->irq_lock); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 57e19fe8df55..97ee703a1bd6 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -107,6 +107,18 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + +void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); #endif -- cgit v1.2.1 From 69b6fe0c6e7f560165d655bbb127f8d69b7358ea Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 12:40:58 +0000 Subject: KVM: arm/arm64: vgic-new: Add ACTIVE registers handlers The active register handlers are shared between the v2 and v3 emulation, so their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. Since activation/deactivation of an interrupt may happen entirely in the guest without it ever exiting, we need some extra logic to properly track the active state. For clearing the active state, we basically have to halt the guest to make sure this is properly propagated into the respective VCPUs. Signed-off-by: Andre Przywara --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 +- virt/kvm/arm/vgic/vgic-mmio.c | 81 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 10 +++++ 3 files changed, 93 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index c13a7089bc9a..12e101b8fd52 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -84,10 +84,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_active, vgic_mmio_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_active, vgic_mmio_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, vgic_mmio_read_raz, vgic_mmio_write_wi, 8, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index d8dc8f6480dd..79a4622dad04 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -155,6 +155,87 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, } } +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; + int i; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->active) + value |= (1U << i); + } + + return value; +} + +void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + kvm_arm_halt_guest(vcpu->kvm); + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + /* + * If this virtual IRQ was written into a list register, we + * have to make sure the CPU that runs the VCPU thread has + * synced back LR state to the struct vgic_irq. We can only + * know this for sure, when either this irq is not assigned to + * anyone's AP list anymore, or the VCPU thread is not + * running on any CPUs. + * + * In the opposite case, we know the VCPU thread may be on its + * way back from the guest and still has to sync back this + * IRQ, so we release and re-acquire the spin_lock to let the + * other thread sync back the IRQ. + */ + while (irq->vcpu && /* IRQ may have state in an LR somewhere */ + irq->vcpu->cpu != -1) /* VCPU thread is running */ + cond_resched_lock(&irq->irq_lock); + + irq->active = false; + spin_unlock(&irq->irq_lock); + } + kvm_arm_resume_guest(vcpu->kvm); +} + +void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + int i; + + for_each_set_bit(i, &val, len * 8) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + /* + * If the IRQ was already active or there is no target VCPU + * assigned at the moment, then just proceed. + */ + if (irq->active || !irq->target_vcpu) { + irq->active = true; + + spin_unlock(&irq->irq_lock); + continue; + } + + irq->active = true; + vgic_queue_irq_unlock(vcpu->kvm, irq); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 97ee703a1bd6..50b4464a0730 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -118,6 +118,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + +void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); -- cgit v1.2.1 From 055658bf48fcc6afdf90810e7e8f4e98f486c0d2 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 14:34:02 +0000 Subject: KVM: arm/arm64: vgic-new: Add PRIORITY registers handlers The priority register handlers are shared between the v2 and v3 emulation, so their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. There is a corner case when we change the priority of a pending interrupt which we don't handle at the moment. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 2 +- virt/kvm/arm/vgic/vgic-mmio.c | 40 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 7 +++++++ virt/kvm/arm/vgic/vgic.h | 2 ++ 4 files changed, 50 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 12e101b8fd52..d564a3068fcd 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -90,7 +90,7 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_active, vgic_mmio_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, - vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + vgic_mmio_read_priority, vgic_mmio_write_priority, 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET, vgic_mmio_read_raz, vgic_mmio_write_wi, 8, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 79a4622dad04..6f4e05bada17 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -236,6 +236,46 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, } } +unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + int i; + u64 val = 0; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + val |= (u64)irq->priority << (i * 8); + } + + return val; +} + +/* + * We currently don't handle changing the priority of an interrupt that + * is already pending on a VCPU. If there is a need for this, we would + * need to make this VCPU exit and re-evaluate the priorities, potentially + * leading to this interrupt getting presented now to the guest (if it has + * been masked by the priority mask before). + */ +void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + int i; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + /* Narrow the priority range to what we actually support */ + irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); + spin_unlock(&irq->irq_lock); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 50b4464a0730..7e73f10bf646 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -129,6 +129,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); #endif diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index cf620157e1e4..e57f8d54c792 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -19,6 +19,8 @@ #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b +#define VGIC_PRI_BITS 5 + #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, -- cgit v1.2.1 From 79717e4ac09c7c0c1414a3338fec457d982e9dd8 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 12:41:31 +0000 Subject: KVM: arm/arm64: vgic-new: Add CONFIG registers handlers The config register handlers are shared between the v2 and v3 emulation, so their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. Signed-off-by: Andre Przywara --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 2 +- virt/kvm/arm/vgic/vgic-mmio.c | 47 ++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.h | 7 ++++++ 3 files changed, 55 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index d564a3068fcd..bb7389edadd3 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -96,7 +96,7 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_wi, 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG, - vgic_mmio_read_raz, vgic_mmio_write_wi, 2, + vgic_mmio_read_config, vgic_mmio_write_config, 2, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT, vgic_mmio_read_raz, vgic_mmio_write_wi, 4, diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 6f4e05bada17..9de80be4d607 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -276,6 +276,53 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, } } +unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 2); + u32 value = 0; + int i; + + for (i = 0; i < len * 4; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->config == VGIC_CONFIG_EDGE) + value |= (2U << (i * 2)); + } + + return value; +} + +void vgic_mmio_write_config(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 2); + int i; + + for (i = 0; i < len * 4; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + /* + * The configuration cannot be changed for SGIs in general, + * for PPIs this is IMPLEMENTATION DEFINED. The arch timer + * code relies on PPIs being level triggered, so we also + * make them read-only here. + */ + if (intid + i < VGIC_NR_PRIVATE_IRQS) + continue; + + spin_lock(&irq->irq_lock); + if (test_bit(i * 2 + 1, &val)) { + irq->config = VGIC_CONFIG_EDGE; + } else { + irq->config = VGIC_CONFIG_LEVEL; + irq->pending = irq->line_level | irq->soft_pending; + } + spin_unlock(&irq->irq_lock); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 7e73f10bf646..d1348acc0e72 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -136,6 +136,13 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + +void vgic_mmio_write_config(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val); + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); #endif -- cgit v1.2.1 From 2c234d6f18267614aaa9b0e9148a7daa6e2b234d Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 12:41:55 +0000 Subject: KVM: arm/arm64: vgic-new: Add TARGET registers handlers The target register handlers are v2 emulation specific, so their implementation lives entirely in vgic-mmio-v2.c. We copy the old VGIC behaviour of assigning an IRQ to the first VCPU set in the target mask instead of making it possibly pending on multiple VCPUs. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 43 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index bb7389edadd3..52389ffbfffd 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -64,6 +64,47 @@ static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu, } } +static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + int i; + u64 val = 0; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + val |= (u64)irq->targets << (i * 8); + } + + return val; +} + +static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + int i; + + /* GICD_ITARGETSR[0-7] are read-only */ + if (intid < VGIC_NR_PRIVATE_IRQS) + return; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); + int target; + + spin_lock(&irq->irq_lock); + + irq->targets = (val >> (i * 8)) & 0xff; + target = irq->targets ? __ffs(irq->targets) : 0; + irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); + + spin_unlock(&irq->irq_lock); + } +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, @@ -93,7 +134,7 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_priority, vgic_mmio_write_priority, 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + vgic_mmio_read_target, vgic_mmio_write_target, 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG, vgic_mmio_read_config, vgic_mmio_write_config, 2, -- cgit v1.2.1 From 55cc01fb9004ea93345f30aa26a3c3fc22d4f46a Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 12:42:05 +0000 Subject: KVM: arm/arm64: vgic-new: Add SGIR register handler Triggering an IPI via this register is v2 specific, so the implementation lives entirely in vgic-mmio-v2.c. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 43 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 52389ffbfffd..c884e9bec29f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -64,6 +64,47 @@ static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu, } } +static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus); + int intid = val & 0xf; + int targets = (val >> 16) & 0xff; + int mode = (val >> 24) & 0x03; + int c; + struct kvm_vcpu *vcpu; + + switch (mode) { + case 0x0: /* as specified by targets */ + break; + case 0x1: + targets = (1U << nr_vcpus) - 1; /* all, ... */ + targets &= ~(1U << source_vcpu->vcpu_id); /* but self */ + break; + case 0x2: /* this very vCPU only */ + targets = (1U << source_vcpu->vcpu_id); + break; + case 0x3: /* reserved */ + return; + } + + kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) { + struct vgic_irq *irq; + + if (!(targets & (1U << c))) + continue; + + irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); + + spin_lock(&irq->irq_lock); + irq->pending = true; + irq->source |= 1U << source_vcpu->vcpu_id; + + vgic_queue_irq_unlock(source_vcpu->kvm, irq); + } +} + static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) { @@ -140,7 +181,7 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_config, vgic_mmio_write_config, 2, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT, - vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR, vgic_mmio_read_raz, vgic_mmio_write_wi, 16, -- cgit v1.2.1 From ed40213ef9b02b0f5e9e1807c45ee45407765a27 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 9 Dec 2015 16:21:37 +0000 Subject: KVM: arm/arm64: vgic-new: Add SGIPENDR register handlers As this register is v2 specific, its implementation lives entirely in vgic-mmio-v2.c. This register allows setting the source mask of an IPI. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 62 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index c884e9bec29f..3925d4cbec62 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -146,6 +146,64 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, } } +static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = addr & 0x0f; + int i; + u64 val = 0; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + val |= (u64)irq->source << (i * 8); + } + return val; +} + +static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = addr & 0x0f; + int i; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + irq->source &= ~((val >> (i * 8)) & 0xff); + if (!irq->source) + irq->pending = false; + + spin_unlock(&irq->irq_lock); + } +} + +static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 intid = addr & 0x0f; + int i; + + for (i = 0; i < len; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + spin_lock(&irq->irq_lock); + + irq->source |= (val >> (i * 8)) & 0xff; + + if (irq->source) { + irq->pending = true; + vgic_queue_irq_unlock(vcpu->kvm, irq); + } else { + spin_unlock(&irq->irq_lock); + } + } +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, @@ -184,10 +242,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_sgir, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET, - vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), }; -- cgit v1.2.1 From ed9b8cefa91695119e634979db6090d0700a21f8 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 14:34:34 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 MMIO handling framework Create a new file called vgic-mmio-v3.c and describe the GICv3 distributor and redistributor registers there. This adds a special macro to deal with the split of SGI/PPI in the redistributor and SPIs in the distributor, which allows us to reuse the existing GICv2 handlers for those registers which are compatible. Also we provide a function to deal with the registration of the two separate redistributor frames per VCPU. Signed-off-by: Andre Przywara Reviewed-by: Eric Auger Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 224 +++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-mmio.c | 5 + virt/kvm/arm/vgic/vgic-mmio.h | 2 + virt/kvm/arm/vgic/vgic.h | 7 ++ 4 files changed, 238 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-mmio-v3.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c new file mode 100644 index 000000000000..83a665474610 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -0,0 +1,224 @@ +/* + * VGICv3 MMIO handling functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include + +#include "vgic.h" +#include "vgic-mmio.h" + +/* + * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the + * redistributors, while SPIs are covered by registers in the distributor + * block. Trying to set private IRQs in this block gets ignored. + * We take some special care here to fix the calculation of the register + * offset. + */ +#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, bpi, acc) \ + { \ + .reg_offset = off, \ + .bits_per_irq = bpi, \ + .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ + .access_flags = acc, \ + .read = vgic_mmio_read_raz, \ + .write = vgic_mmio_write_wi, \ + }, { \ + .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ + .bits_per_irq = bpi, \ + .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \ + .access_flags = acc, \ + .read = rd, \ + .write = wr, \ + } + +static const struct vgic_register_region vgic_v3_dist_registers[] = { + REGISTER_DESC_WITH_LENGTH(GICD_CTLR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, + vgic_mmio_read_rao, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER, + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER, + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, + vgic_mmio_read_pending, vgic_mmio_write_spending, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, + vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, + vgic_mmio_read_active, vgic_mmio_write_sactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, + vgic_mmio_read_active, vgic_mmio_write_cactive, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, + vgic_mmio_read_priority, vgic_mmio_write_priority, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR, + vgic_mmio_read_config, vgic_mmio_write_config, 2, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, + vgic_mmio_read_raz, vgic_mmio_write_wi, 64, + VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, + vgic_mmio_read_raz, vgic_mmio_write_wi, 48, + VGIC_ACCESS_32bit), +}; + +static const struct vgic_register_region vgic_v3_rdbase_registers[] = { + REGISTER_DESC_WITH_LENGTH(GICR_CTLR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_IIDR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_TYPER, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER, + vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, + vgic_mmio_read_raz, vgic_mmio_write_wi, 48, + VGIC_ACCESS_32bit), +}; + +static const struct vgic_register_region vgic_v3_sgibase_registers[] = { + REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0, + vgic_mmio_read_rao, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0, + vgic_mmio_read_enable, vgic_mmio_write_senable, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0, + vgic_mmio_read_enable, vgic_mmio_write_cenable, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ISPENDR0, + vgic_mmio_read_pending, vgic_mmio_write_spending, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ICPENDR0, + vgic_mmio_read_pending, vgic_mmio_write_cpending, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0, + vgic_mmio_read_active, vgic_mmio_write_sactive, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0, + vgic_mmio_read_active, vgic_mmio_write_cactive, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0, + vgic_mmio_read_priority, vgic_mmio_write_priority, 32, + VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), + REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0, + vgic_mmio_read_config, vgic_mmio_write_config, 8, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0, + vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GICR_NSACR, + vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + VGIC_ACCESS_32bit), +}; + +unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) +{ + dev->regions = vgic_v3_dist_registers; + dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); + + kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); + + return SZ_64K; +} + +int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address) +{ + int nr_vcpus = atomic_read(&kvm->online_vcpus); + struct kvm_vcpu *vcpu; + struct vgic_io_device *devices; + int c, ret = 0; + + devices = kmalloc(sizeof(struct vgic_io_device) * nr_vcpus * 2, + GFP_KERNEL); + if (!devices) + return -ENOMEM; + + kvm_for_each_vcpu(c, vcpu, kvm) { + gpa_t rd_base = redist_base_address + c * SZ_64K * 2; + gpa_t sgi_base = rd_base + SZ_64K; + struct vgic_io_device *rd_dev = &devices[c * 2]; + struct vgic_io_device *sgi_dev = &devices[c * 2 + 1]; + + kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); + rd_dev->base_addr = rd_base; + rd_dev->regions = vgic_v3_rdbase_registers; + rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers); + rd_dev->redist_vcpu = vcpu; + + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, + SZ_64K, &rd_dev->dev); + mutex_unlock(&kvm->slots_lock); + + if (ret) + break; + + kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops); + sgi_dev->base_addr = sgi_base; + sgi_dev->regions = vgic_v3_sgibase_registers; + sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers); + sgi_dev->redist_vcpu = vcpu; + + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, + SZ_64K, &sgi_dev->dev); + mutex_unlock(&kvm->slots_lock); + if (ret) { + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &rd_dev->dev); + break; + } + } + + if (ret) { + /* The current c failed, so we start with the previous one. */ + for (c--; c >= 0; c--) { + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &devices[c * 2].dev); + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, + &devices[c * 2 + 1].dev); + } + kfree(devices); + } else { + kvm->arch.vgic.redist_iodevs = devices; + } + + return ret; +} diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 9de80be4d607..4ef35719fcbe 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -478,6 +478,11 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, case VGIC_V2: len = vgic_v2_init_dist_iodev(io_device); break; +#ifdef CONFIG_KVM_ARM_VGIC_V3 + case VGIC_V3: + len = vgic_v3_init_dist_iodev(io_device); + break; +#endif default: BUG_ON(1); } diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index d1348acc0e72..850901482aec 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -145,4 +145,6 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); +unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev); + #endif diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index e57f8d54c792..6742b11ddd91 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -42,6 +42,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); +int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); #else static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) { @@ -63,6 +64,12 @@ static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) { } + +static inline int vgic_register_redist_iodevs(struct kvm *kvm, + gpa_t dist_base_address) +{ + return -ENODEV; +} #endif #endif -- cgit v1.2.1 From fd59ed3be17e414aa3400f35d0f5faa01dd74185 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 27 Jan 2016 14:54:30 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 CTLR, IIDR, TYPER handlers As in the GICv2 emulation we handle those three registers in one function. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 48 +++++++++++++++++++++++++++++++++++++++- virt/kvm/arm/vgic/vgic.h | 1 + 2 files changed, 48 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 83a665474610..bd062b8f7ba8 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -22,6 +22,52 @@ #include "vgic.h" #include "vgic-mmio.h" +static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 value = 0; + + switch (addr & 0x0c) { + case GICD_CTLR: + if (vcpu->kvm->arch.vgic.enabled) + value |= GICD_CTLR_ENABLE_SS_G1; + value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; + break; + case GICD_TYPER: + value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + value = (value >> 5) - 1; + value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19; + break; + case GICD_IIDR: + value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); + break; + default: + return 0; + } + + return value; +} + +static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + bool was_enabled = dist->enabled; + + switch (addr & 0x0c) { + case GICD_CTLR: + dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; + + if (!was_enabled && dist->enabled) + vgic_kick_vcpus(vcpu->kvm); + break; + case GICD_TYPER: + case GICD_IIDR: + return; + } +} + /* * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the * redistributors, while SPIs are covered by registers in the distributor @@ -48,7 +94,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GICD_CTLR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, vgic_mmio_read_rao, vgic_mmio_write_wi, 1, diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 6742b11ddd91..44c6a1bc189a 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -19,6 +19,7 @@ #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b +#define INTERRUPT_ID_BITS_SPIS 10 #define VGIC_PRI_BITS 5 #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) -- cgit v1.2.1 From 741972d8a69ce748d84dfe0d68403054dfcae657 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 27 Jan 2016 14:54:46 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 redistributor IIDR and TYPER handler The redistributor TYPER tells the OS about the associated MPIDR, also the LAST bit is crucial to determine the number of redistributors. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index bd062b8f7ba8..64edd2354334 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -22,6 +22,13 @@ #include "vgic.h" #include "vgic-mmio.h" +/* extract @num bytes at @offset bytes offset in data */ +static unsigned long extract_bytes(unsigned long data, unsigned int offset, + unsigned int num) +{ + return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); +} + static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) { @@ -68,6 +75,27 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, } } +static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + int target_vcpu_id = vcpu->vcpu_id; + u64 value; + + value = (mpidr & GENMASK(23, 0)) << 32; + value |= ((target_vcpu_id & 0xffff) << 8); + if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) + value |= GICR_TYPER_LAST; + + return extract_bytes(value, addr & 7, len); +} + +static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); +} + /* * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the * redistributors, while SPIs are covered by registers in the distributor @@ -142,10 +170,10 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_IIDR, - vgic_mmio_read_raz, vgic_mmio_write_wi, 4, + vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_TYPER, - vgic_mmio_read_raz, vgic_mmio_write_wi, 8, + vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, vgic_mmio_read_raz, vgic_mmio_write_wi, 8, -- cgit v1.2.1 From 54f59d2b3a0a3d4e6f5038f5831aedb21350209d Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 22 Jan 2016 18:18:52 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 IDREGS register handler We implement the only one ID register that is required by the architecture, also this is the one that Linux actually checks. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 64edd2354334..22e512ca149f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -96,6 +96,18 @@ static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); } +static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + switch (addr & 0xffff) { + case GICD_PIDR2: + /* report a GICv3 compliant implementation */ + return 0x3b; + } + + return 0; +} + /* * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the * redistributors, while SPIs are covered by registers in the distributor @@ -161,7 +173,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_wi, 64, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, - vgic_mmio_read_raz, vgic_mmio_write_wi, 48, + vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, VGIC_ACCESS_32bit), }; @@ -182,7 +194,7 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_wi, 8, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, - vgic_mmio_read_raz, vgic_mmio_write_wi, 48, + vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, VGIC_ACCESS_32bit), }; -- cgit v1.2.1 From 78a714aba030395e72d03f0ff8a4c1481956e808 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 25 Jan 2016 16:45:37 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 IROUTER register handlers Since GICv3 supports much more than the 8 CPUs the GICv2 ITARGETSR register can handle, the new IROUTER register covers the whole range of possible target (V)CPUs by using the same MPIDR that the cores report themselves. In addition to translating this MPIDR into a vcpu pointer we store the originally written value as well. The architecture allows to write any values into the register, which must be read back as written. Since we don't support affinity level 3, we don't need to take care about the upper word of this 64-bit register, which simplifies the handling a bit. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 41 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 22e512ca149f..4dcef9ee83ae 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -75,6 +75,45 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, } } +static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + int intid = VGIC_ADDR_TO_INTID(addr, 64); + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); + + if (!irq) + return 0; + + /* The upper word is RAZ for us. */ + if (addr & 4) + return 0; + + return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len); +} + +static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + int intid = VGIC_ADDR_TO_INTID(addr, 64); + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); + + if (!irq) + return; + + /* The upper word is WI for us since we don't implement Aff3. */ + if (addr & 4) + return; + + spin_lock(&irq->irq_lock); + + /* We only care about and preserve Aff0, Aff1 and Aff2. */ + irq->mpidr = val & GENMASK(23, 0); + irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); + + spin_unlock(&irq->irq_lock); +} + static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) { @@ -170,7 +209,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { vgic_mmio_read_raz, vgic_mmio_write_wi, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, - vgic_mmio_read_raz, vgic_mmio_write_wi, 64, + vgic_mmio_read_irouter, vgic_mmio_write_irouter, 64, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, -- cgit v1.2.1 From 621ecd8d2123bc13e140b519e01a18200aeb614c Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 26 Jan 2016 15:31:15 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICv3 SGI system register trap handler In contrast to GICv2 SGIs in a GICv3 implementation are not triggered by a MMIO write, but with a system register write. KVM knows about that register already, we just need to implement the handler and wire it up to the core KVM/ARM code. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio-v3.c | 106 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 4dcef9ee83ae..a0c515a412a7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -347,3 +347,109 @@ int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address) return ret; } + +/* + * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI + * generation register ICC_SGI1R_EL1) with a given VCPU. + * If the VCPU's MPIDR matches, return the level0 affinity, otherwise + * return -1. + */ +static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) +{ + unsigned long affinity; + int level0; + + /* + * Split the current VCPU's MPIDR into affinity level 0 and the + * rest as this is what we have to compare against. + */ + affinity = kvm_vcpu_get_mpidr_aff(vcpu); + level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); + affinity &= ~MPIDR_LEVEL_MASK; + + /* bail out if the upper three levels don't match */ + if (sgi_aff != affinity) + return -1; + + /* Is this VCPU's bit set in the mask ? */ + if (!(sgi_cpu_mask & BIT(level0))) + return -1; + + return level0; +} + +/* + * The ICC_SGI* registers encode the affinity differently from the MPIDR, + * so provide a wrapper to use the existing defines to isolate a certain + * affinity level. + */ +#define SGI_AFFINITY_LEVEL(reg, level) \ + ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ + >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) + +/** + * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs + * @vcpu: The VCPU requesting a SGI + * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU + * + * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. + * This will trap in sys_regs.c and call this function. + * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the + * target processors as well as a bitmask of 16 Aff0 CPUs. + * If the interrupt routing mode bit is not set, we iterate over all VCPUs to + * check for matching ones. If this bit is set, we signal all, but not the + * calling VCPU. + */ +void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *c_vcpu; + u16 target_cpus; + u64 mpidr; + int sgi, c; + int vcpu_id = vcpu->vcpu_id; + bool broadcast; + + sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; + broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); + target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; + mpidr = SGI_AFFINITY_LEVEL(reg, 3); + mpidr |= SGI_AFFINITY_LEVEL(reg, 2); + mpidr |= SGI_AFFINITY_LEVEL(reg, 1); + + /* + * We iterate over all VCPUs to find the MPIDRs matching the request. + * If we have handled one CPU, we clear its bit to detect early + * if we are already finished. This avoids iterating through all + * VCPUs when most of the times we just signal a single VCPU. + */ + kvm_for_each_vcpu(c, c_vcpu, kvm) { + struct vgic_irq *irq; + + /* Exit early if we have dealt with all requested CPUs */ + if (!broadcast && target_cpus == 0) + break; + + /* Don't signal the calling VCPU */ + if (broadcast && c == vcpu_id) + continue; + + if (!broadcast) { + int level0; + + level0 = match_mpidr(mpidr, target_cpus, c_vcpu); + if (level0 == -1) + continue; + + /* remove this matching VCPU from the mask */ + target_cpus &= ~BIT(level0); + } + + irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); + + spin_lock(&irq->irq_lock); + irq->pending = true; + + vgic_queue_irq_unlock(vcpu->kvm, irq); + } +} -- cgit v1.2.1 From c86c772191d7e65f873e6908e9604b31168936cd Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 30 Nov 2015 14:01:58 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: KVM device ops registration This patch introduces the skeleton for the KVM device operations associated to KVM_DEV_TYPE_ARM_VGIC_V2 and KVM_DEV_TYPE_ARM_VGIC_V3. At that stage kvm_vgic_create is stubbed. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 108 ++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 2 + 2 files changed, 110 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-kvm-device.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c new file mode 100644 index 000000000000..ff332f34a221 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -0,0 +1,108 @@ +/* + * VGIC: KVM DEVICE API + * + * Copyright (C) 2015 ARM Ltd. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include + +/* common helpers */ + +static int vgic_create(struct kvm_device *dev, u32 type) +{ + return kvm_vgic_create(dev->kvm, type); +} + +static void vgic_destroy(struct kvm_device *dev) +{ + kfree(dev); +} + +void kvm_register_vgic_device(unsigned long type) +{ + switch (type) { + case KVM_DEV_TYPE_ARM_VGIC_V2: + kvm_register_device_ops(&kvm_arm_vgic_v2_ops, + KVM_DEV_TYPE_ARM_VGIC_V2); + break; +#ifdef CONFIG_KVM_ARM_VGIC_V3 + case KVM_DEV_TYPE_ARM_VGIC_V3: + kvm_register_device_ops(&kvm_arm_vgic_v3_ops, + KVM_DEV_TYPE_ARM_VGIC_V3); + break; +#endif + } +} + +/* V2 ops */ + +static int vgic_v2_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int vgic_v2_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int vgic_v2_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +struct kvm_device_ops kvm_arm_vgic_v2_ops = { + .name = "kvm-arm-vgic-v2", + .create = vgic_create, + .destroy = vgic_destroy, + .set_attr = vgic_v2_set_attr, + .get_attr = vgic_v2_get_attr, + .has_attr = vgic_v2_has_attr, +}; + +/* V3 ops */ + +#ifdef CONFIG_KVM_ARM_VGIC_V3 + +static int vgic_v3_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int vgic_v3_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int vgic_v3_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +struct kvm_device_ops kvm_arm_vgic_v3_ops = { + .name = "kvm-arm-vgic-v3", + .create = vgic_create, + .destroy = vgic_destroy, + .set_attr = vgic_v3_set_attr, + .get_attr = vgic_v3_get_attr, + .has_attr = vgic_v3_has_attr, +}; + +#endif /* CONFIG_KVM_ARM_VGIC_V3 */ + diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 44c6a1bc189a..77b0ab38a271 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -73,4 +73,6 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm, } #endif +void kvm_register_vgic_device(unsigned long type); + #endif -- cgit v1.2.1 From fca256026bb0d78991f975a7d4a3f601b7234401 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 16:33:22 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: KVM_DEV_ARM_VGIC_GRP_NR_IRQS This patch implements the KVM_DEV_ARM_VGIC_GRP_NR_IRQS group. This modality is supported by both VGIC V2 and V3 KVM device as will be other groups, hence the introduction of common helpers. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 83 +++++++++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 4 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index ff332f34a221..05ff925f5377 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -15,9 +15,69 @@ */ #include #include +#include +#include "vgic.h" /* common helpers */ +static int vgic_set_common_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 val; + int ret = 0; + + if (get_user(val, uaddr)) + return -EFAULT; + + /* + * We require: + * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs + * - at most 1024 interrupts + * - a multiple of 32 interrupts + */ + if (val < (VGIC_NR_PRIVATE_IRQS + 32) || + val > VGIC_MAX_RESERVED || + (val & 31)) + return -EINVAL; + + mutex_lock(&dev->kvm->lock); + + if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) + ret = -EBUSY; + else + dev->kvm->arch.vgic.nr_spis = + val - VGIC_NR_PRIVATE_IRQS; + + mutex_unlock(&dev->kvm->lock); + + return ret; + } + } + + return -ENXIO; +} + +static int vgic_get_common_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + int r = -ENXIO; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + + r = put_user(dev->kvm->arch.vgic.nr_spis + + VGIC_NR_PRIVATE_IRQS, uaddr); + break; + } + } + + return r; +} + static int vgic_create(struct kvm_device *dev, u32 type) { return kvm_vgic_create(dev->kvm, type); @@ -49,18 +109,29 @@ void kvm_register_vgic_device(unsigned long type) static int vgic_v2_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return -ENXIO; + int ret; + + ret = vgic_set_common_attr(dev, attr); + return ret; + } static int vgic_v2_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return -ENXIO; + int ret; + + ret = vgic_get_common_attr(dev, attr); + return ret; } static int vgic_v2_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: + return 0; + } return -ENXIO; } @@ -80,18 +151,22 @@ struct kvm_device_ops kvm_arm_vgic_v2_ops = { static int vgic_v3_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return -ENXIO; + return vgic_set_common_attr(dev, attr); } static int vgic_v3_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return -ENXIO; + return vgic_get_common_attr(dev, attr); } static int vgic_v3_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: + return 0; + } return -ENXIO; } -- cgit v1.2.1 From afcc7c50ce6e2fb18fd0535813c1d612265b1899 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 17:22:05 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: KVM_DEV_ARM_VGIC_GRP_CTRL This patch implements the KVM_DEV_ARM_VGIC_GRP_CTRL group API featuring KVM_DEV_ARM_VGIC_CTRL_INIT attribute. The vgic_init function is not yet implemented though. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 05ff925f5377..e153f1254586 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -23,6 +23,8 @@ static int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + int r; + switch (attr->group) { case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { u32 __user *uaddr = (u32 __user *)(long)attr->addr; @@ -55,6 +57,16 @@ static int vgic_set_common_attr(struct kvm_device *dev, return ret; } + case KVM_DEV_ARM_VGIC_GRP_CTRL: { + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + mutex_lock(&dev->kvm->lock); + r = vgic_init(dev->kvm); + mutex_unlock(&dev->kvm->lock); + return r; + } + break; + } } return -ENXIO; @@ -131,6 +143,11 @@ static int vgic_v2_has_attr(struct kvm_device *dev, switch (attr->group) { case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: return 0; + case KVM_DEV_ARM_VGIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + return 0; + } } return -ENXIO; } @@ -166,6 +183,11 @@ static int vgic_v3_has_attr(struct kvm_device *dev, switch (attr->group) { case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: return 0; + case KVM_DEV_ARM_VGIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_ARM_VGIC_CTRL_INIT: + return 0; + } } return -ENXIO; } -- cgit v1.2.1 From e2c1f9abff83ee0ad0f78e03918c7edf070edb39 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 16:36:04 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: implement kvm_vgic_addr kvm_vgic_addr is used by the userspace to set the base address of the following register regions, as seen by the guest: - distributor(v2 and v3), - re-distributors (v3), - CPU interface (v2). Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 86 +++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 3 ++ 2 files changed, 89 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index e153f1254586..082829a764dd 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -16,10 +16,96 @@ #include #include #include +#include #include "vgic.h" /* common helpers */ +static int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, + phys_addr_t addr, phys_addr_t alignment) +{ + if (addr & ~KVM_PHYS_MASK) + return -E2BIG; + + if (!IS_ALIGNED(addr, alignment)) + return -EINVAL; + + if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) + return -EEXIST; + + return 0; +} + +/** + * kvm_vgic_addr - set or get vgic VM base addresses + * @kvm: pointer to the vm struct + * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX + * @addr: pointer to address value + * @write: if true set the address in the VM address space, if false read the + * address + * + * Set or get the vgic base addresses for the distributor and the virtual CPU + * interface in the VM physical address space. These addresses are properties + * of the emulated core/SoC and therefore user space initially knows this + * information. + * Check them for sanity (alignment, double assignment). We can't check for + * overlapping regions in case of a virtual GICv3 here, since we don't know + * the number of VCPUs yet, so we defer this check to map_resources(). + */ +int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) +{ + int r = 0; + struct vgic_dist *vgic = &kvm->arch.vgic; + int type_needed; + phys_addr_t *addr_ptr, alignment; + + mutex_lock(&kvm->lock); + switch (type) { + case KVM_VGIC_V2_ADDR_TYPE_DIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; + addr_ptr = &vgic->vgic_dist_base; + alignment = SZ_4K; + break; + case KVM_VGIC_V2_ADDR_TYPE_CPU: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; + addr_ptr = &vgic->vgic_cpu_base; + alignment = SZ_4K; + break; +#ifdef CONFIG_KVM_ARM_VGIC_V3 + case KVM_VGIC_V3_ADDR_TYPE_DIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; + addr_ptr = &vgic->vgic_dist_base; + alignment = SZ_64K; + break; + case KVM_VGIC_V3_ADDR_TYPE_REDIST: + type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; + addr_ptr = &vgic->vgic_redist_base; + alignment = SZ_64K; + break; +#endif + default: + r = -ENODEV; + goto out; + } + + if (vgic->vgic_model != type_needed) { + r = -ENODEV; + goto out; + } + + if (write) { + r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment); + if (!r) + *addr_ptr = *addr; + } else { + *addr = *addr_ptr; + } + +out: + mutex_unlock(&kvm->lock); + return r; +} + static int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 77b0ab38a271..6abc9a35c228 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -19,6 +19,9 @@ #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b +#define VGIC_ADDR_UNDEF (-1) +#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) + #define INTERRUPT_ID_BITS_SPIS 10 #define VGIC_PRI_BITS 5 -- cgit v1.2.1 From e5c3029467cfa0acd89cfbd9cee1ae848e5eb8b0 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 17:27:39 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: KVM_DEV_ARM_VGIC_GRP_ADDR This patch implements the KVM_DEV_ARM_VGIC_GRP_ADDR group which enables to set the base address of GIC regions as seen by the guest. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 082829a764dd..a7090970be72 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -112,6 +112,17 @@ static int vgic_set_common_attr(struct kvm_device *dev, int r; switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: { + u64 __user *uaddr = (u64 __user *)(long)attr->addr; + u64 addr; + unsigned long type = (unsigned long)attr->attr; + + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + + r = kvm_vgic_addr(dev->kvm, type, &addr, true); + return (r == -ENODEV) ? -ENXIO : r; + } case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { u32 __user *uaddr = (u32 __user *)(long)attr->addr; u32 val; @@ -164,6 +175,19 @@ static int vgic_get_common_attr(struct kvm_device *dev, int r = -ENXIO; switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: { + u64 __user *uaddr = (u64 __user *)(long)attr->addr; + u64 addr; + unsigned long type = (unsigned long)attr->attr; + + r = kvm_vgic_addr(dev->kvm, type, &addr, false); + if (r) + return (r == -ENODEV) ? -ENXIO : r; + + if (copy_to_user(uaddr, &addr, sizeof(addr))) + return -EFAULT; + break; + } case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { u32 __user *uaddr = (u32 __user *)(long)attr->addr; @@ -227,6 +251,13 @@ static int vgic_v2_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: + switch (attr->attr) { + case KVM_VGIC_V2_ADDR_TYPE_DIST: + case KVM_VGIC_V2_ADDR_TYPE_CPU: + return 0; + } + break; case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: return 0; case KVM_DEV_ARM_VGIC_GRP_CTRL: @@ -267,6 +298,13 @@ static int vgic_v3_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_ADDR: + switch (attr->attr) { + case KVM_VGIC_V3_ADDR_TYPE_DIST: + case KVM_VGIC_V3_ADDR_TYPE_REDIST: + return 0; + } + break; case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: return 0; case KVM_DEV_ARM_VGIC_GRP_CTRL: -- cgit v1.2.1 From f94591e2e6fdca6e9a2cbf23e36a8803b4f605fe Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 17:34:52 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_kvm_device: access to VGIC registers This patch implements the switches for KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS API which allows the userspace to access VGIC registers. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 53 +++++++++++++++++++++++++++++++++++-- virt/kvm/arm/vgic/vgic-mmio-v2.c | 38 ++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 1 + 3 files changed, 90 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index a7090970be72..78621283a301 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -226,6 +226,21 @@ void kvm_register_vgic_device(unsigned long type) } } +/** vgic_attr_regs_access: allows user space to read/write VGIC registers + * + * @dev: kvm device handle + * @attr: kvm device attribute + * @reg: address the value is read or written + * @is_write: write flag + * + */ +static int vgic_attr_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + u32 *reg, bool is_write) +{ + return -ENXIO; +} + /* V2 ops */ static int vgic_v2_set_attr(struct kvm_device *dev, @@ -234,8 +249,23 @@ static int vgic_v2_set_attr(struct kvm_device *dev, int ret; ret = vgic_set_common_attr(dev, attr); - return ret; + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 reg; + + if (get_user(reg, uaddr)) + return -EFAULT; + return vgic_attr_regs_access(dev, attr, ®, true); + } + } + + return -ENXIO; } static int vgic_v2_get_attr(struct kvm_device *dev, @@ -244,7 +274,23 @@ static int vgic_v2_get_attr(struct kvm_device *dev, int ret; ret = vgic_get_common_attr(dev, attr); - return ret; + if (ret != -ENXIO) + return ret; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + u32 reg = 0; + + ret = vgic_attr_regs_access(dev, attr, ®, false); + if (ret) + return ret; + return put_user(reg, uaddr); + } + } + + return -ENXIO; } static int vgic_v2_has_attr(struct kvm_device *dev, @@ -258,6 +304,9 @@ static int vgic_v2_has_attr(struct kvm_device *dev, return 0; } break; + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + return vgic_v2_has_attr_regs(dev, attr); case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: return 0; case KVM_DEV_ARM_VGIC_GRP_CTRL: diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 3925d4cbec62..71896900ad12 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -258,3 +258,41 @@ unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) return SZ_4K; } + +int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + const struct vgic_register_region *regions; + gpa_t addr; + int nr_regions, i, len; + + addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + regions = vgic_v2_dist_registers; + nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); + break; + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + return -ENXIO; /* TODO: describe CPU i/f regs also */ + default: + return -ENXIO; + } + + /* We only support aligned 32-bit accesses. */ + if (addr & 3) + return -ENXIO; + + for (i = 0; i < nr_regions; i++) { + if (regions[i].bits_per_irq) + len = (regions[i].bits_per_irq * nr_irqs) / 8; + else + len = regions[i].len; + + if (regions[i].reg_offset <= addr && + regions[i].reg_offset + len > addr) + return 0; + } + + return -ENXIO; +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 6abc9a35c228..a264c5ff476e 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -37,6 +37,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); +int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); -- cgit v1.2.1 From c3199f28e09496aa9fec9313b4f6e90e7dc913f0 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Mon, 25 Apr 2016 01:11:37 +0200 Subject: KVM: arm/arm64: vgic-new: Export register access interface Userland can access the emulated GIC to save and restore its state for initialization or migration purposes. The kvm_io_bus API requires an absolute gpa, which does not fit the KVM_DEV_ARM_VGIC_GRP_DIST_REGS user API, that only provides relative offsets. So we provide a wrapper to plug into our MMIO framework and find the respective register handler. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara --- virt/kvm/arm/vgic/vgic-mmio-v2.c | 36 ++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 2 ++ 2 files changed, 38 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 71896900ad12..a25512212bc7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -296,3 +296,39 @@ int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) return -ENXIO; } + +/* + * When userland tries to access the VGIC register handlers, we need to + * create a usable struct vgic_io_device to be passed to the handlers and we + * have to set up a buffer similar to what would have happened if a guest MMIO + * access occurred, including doing endian conversions on BE systems. + */ +static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, + bool is_write, int offset, u32 *val) +{ + unsigned int len = 4; + u8 buf[4]; + int ret; + + if (is_write) { + vgic_data_host_to_mmio_bus(buf, len, *val); + ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf); + } else { + ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf); + if (!ret) + *val = vgic_data_mmio_bus_to_host(buf, len); + } + + return ret; +} + +int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, + int offset, u32 *val) +{ + struct vgic_io_device dev = { + .regions = vgic_v2_dist_registers, + .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers), + }; + + return vgic_uaccess(vcpu, &dev, is_write, offset, val); +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index a264c5ff476e..f8260268dd24 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -38,6 +38,8 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); +int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, + int offset, u32 *val); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); -- cgit v1.2.1 From 7d450e2821710718fd6703e9c486249cee913bab Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 1 Dec 2015 22:36:37 +0000 Subject: KVM: arm/arm64: vgic-new: Add userland access to VGIC dist registers Userland may want to save and restore the state of the in-kernel VGIC, so we provide the code which takes a userland request and translate that into calls to our MMIO framework. From Christoffer: When accessing the VGIC state from userspace we really don't want a VCPU to be messing with the state at the same time, and the API specifies that we should return -EBUSY if any VCPUs are running. Check and prevent VCPUs from running by grabbing their mutexes, one by one, and error out if we fail. (Note: This could potentially be simplified to just do a simple check and see if any VCPUs are running, and return -EBUSY then, without enforcing the locking throughout the duration of the uaccess, if we think that taking/releasing all these mutexes for every single GIC register access is too heavyweight.) Signed-off-by: Andre Przywara Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 55 ++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 78621283a301..9ee27cb5842b 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -238,7 +238,60 @@ static int vgic_attr_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, u32 *reg, bool is_write) { - return -ENXIO; + gpa_t addr; + int cpuid, ret, c; + struct kvm_vcpu *vcpu, *tmp_vcpu; + int vcpu_lock_idx = -1; + + cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> + KVM_DEV_ARM_VGIC_CPUID_SHIFT; + vcpu = kvm_get_vcpu(dev->kvm, cpuid); + addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + + mutex_lock(&dev->kvm->lock); + + ret = vgic_init(dev->kvm); + if (ret) + goto out; + + if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { + ret = -EINVAL; + goto out; + } + + /* + * Any time a vcpu is run, vcpu_load is called which tries to grab the + * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure + * that no other VCPUs are run and fiddle with the vgic state while we + * access it. + */ + ret = -EBUSY; + kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { + if (!mutex_trylock(&tmp_vcpu->mutex)) + goto out; + vcpu_lock_idx = c; + } + + switch (attr->group) { + case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: + ret = -EINVAL; + break; + case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: + ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg); + break; + default: + ret = -EINVAL; + break; + } + +out: + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { + tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx); + mutex_unlock(&tmp_vcpu->mutex); + } + + mutex_unlock(&dev->kvm->lock); + return ret; } /* V2 ops */ -- cgit v1.2.1 From e4823a7a1b4a4f2549dd223f243779ab9510db22 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 3 Dec 2015 11:47:37 +0000 Subject: KVM: arm/arm64: vgic-new: Add GICH_VMCR accessors Since the GIC CPU interface is always virtualized by the hardware, we don't have CPU interface state information readily available in our emulation if userland wants to save or restore it. Fortunately the GIC hypervisor interface provides the VMCR register to access the required virtual CPU interface bits. Provide wrappers for GICv2 and GICv3 hosts to have access to this register. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-v2.c | 29 +++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v3.c | 22 ++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 21 +++++++++++++++++++++ 3 files changed, 72 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index fb5e65ceffd0..d943059ee6c5 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -174,3 +174,32 @@ void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr) { vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; } + +void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr; + + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & + GICH_VMCR_ALIAS_BINPOINT_MASK; + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & + GICH_VMCR_BINPOINT_MASK; + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & + GICH_VMCR_PRIMASK_MASK; + + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; +} + +void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; + + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> + GICH_VMCR_CTRL_SHIFT; + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> + GICH_VMCR_ALIAS_BINPOINT_SHIFT; + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> + GICH_VMCR_BINPOINT_SHIFT; + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> + GICH_VMCR_PRIMASK_SHIFT; +} diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index fb547da7a43d..8548297c6f76 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -160,3 +160,25 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) { vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; } + +void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr; + + vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; + vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; + vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; + vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; + + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; +} + +void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) +{ + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; + + vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; + vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; + vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; + vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index f8260268dd24..d2c1fd5a795f 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -27,6 +27,13 @@ #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) +struct vgic_vmcr { + u32 ctlr; + u32 abpr; + u32 bpr; + u32 pmr; +}; + struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); @@ -40,6 +47,8 @@ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); +void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); @@ -49,6 +58,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); +void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); #else static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) @@ -72,6 +83,16 @@ static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) { } +static inline +void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ +} + +static inline +void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ +} + static inline int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address) { -- cgit v1.2.1 From 878c569e45066a76a2a841dab965e6d22c4e187e Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Thu, 3 Dec 2015 11:48:42 +0000 Subject: KVM: arm/arm64: vgic-new: Add userland GIC CPU interface access Using the VMCR accessors we provide access to GIC CPU interface state to userland by wiring it up to the existing userland interface. [Marc: move and make VMCR accessors static, streamline MMIO handlers] Signed-off-by: Andre Przywara Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-kvm-device.c | 2 +- virt/kvm/arm/vgic/vgic-mmio-v2.c | 114 +++++++++++++++++++++++++++++++++++- virt/kvm/arm/vgic/vgic.h | 2 + 3 files changed, 116 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 9ee27cb5842b..0130c4b147b7 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -274,7 +274,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, switch (attr->group) { case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: - ret = -EINVAL; + ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg); break; case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index a25512212bc7..a21393637e4b 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -204,6 +204,84 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, } } +static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_set_vmcr(vcpu, vmcr); + else + vgic_v3_set_vmcr(vcpu, vmcr); +} + +static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) +{ + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_get_vmcr(vcpu, vmcr); + else + vgic_v3_get_vmcr(vcpu, vmcr); +} + +#define GICC_ARCH_VERSION_V2 0x2 + +/* These are for userland accesses only, there is no guest-facing emulation. */ +static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + struct vgic_vmcr vmcr; + u32 val; + + vgic_get_vmcr(vcpu, &vmcr); + + switch (addr & 0xff) { + case GIC_CPU_CTRL: + val = vmcr.ctlr; + break; + case GIC_CPU_PRIMASK: + val = vmcr.pmr; + break; + case GIC_CPU_BINPOINT: + val = vmcr.bpr; + break; + case GIC_CPU_ALIAS_BINPOINT: + val = vmcr.abpr; + break; + case GIC_CPU_IDENT: + val = ((PRODUCT_ID_KVM << 20) | + (GICC_ARCH_VERSION_V2 << 16) | + IMPLEMENTER_ARM); + break; + default: + return 0; + } + + return val; +} + +static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + struct vgic_vmcr vmcr; + + vgic_get_vmcr(vcpu, &vmcr); + + switch (addr & 0xff) { + case GIC_CPU_CTRL: + vmcr.ctlr = val; + break; + case GIC_CPU_PRIMASK: + vmcr.pmr = val; + break; + case GIC_CPU_BINPOINT: + vmcr.bpr = val; + break; + case GIC_CPU_ALIAS_BINPOINT: + vmcr.abpr = val; + break; + } + + vgic_set_vmcr(vcpu, &vmcr); +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, @@ -249,6 +327,27 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), }; +static const struct vgic_register_region vgic_v2_cpu_registers[] = { + REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL, + vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK, + vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT, + vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT, + vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO, + vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT, + vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, + VGIC_ACCESS_32bit), +}; + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev) { dev->regions = vgic_v2_dist_registers; @@ -274,7 +373,9 @@ int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) nr_regions = ARRAY_SIZE(vgic_v2_dist_registers); break; case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: - return -ENXIO; /* TODO: describe CPU i/f regs also */ + regions = vgic_v2_cpu_registers; + nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers); + break; default: return -ENXIO; } @@ -322,6 +423,17 @@ static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, return ret; } +int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, + int offset, u32 *val) +{ + struct vgic_io_device dev = { + .regions = vgic_v2_cpu_registers, + .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers), + }; + + return vgic_uaccess(vcpu, &dev, is_write, offset, val); +} + int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index d2c1fd5a795f..de9dc7170c1b 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -47,6 +47,8 @@ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); +int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, + int offset, u32 *val); void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, -- cgit v1.2.1 From 909777324588b40d431e6e3af0911ee62e0d00e3 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Tue, 1 Dec 2015 15:02:35 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_init: implement kvm_vgic_hyp_init Implements kvm_vgic_hyp_init and vgic_probe function. This uses the new firmware independent VGIC probing to support both ACPI and DT based systems (code from Marc Zyngier). The vgic_global struct is enriched with new fields populated by those functions. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-init.c | 123 ++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v2.c | 64 ++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v3.c | 49 +++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 9 ++++ 4 files changed, 245 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-init.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c new file mode 100644 index 000000000000..4523beb029f3 --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include "vgic.h" + +/* GENERIC PROBE */ + +static void vgic_init_maintenance_interrupt(void *info) +{ + enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); +} + +static int vgic_cpu_notify(struct notifier_block *self, + unsigned long action, void *cpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + vgic_init_maintenance_interrupt(NULL); + break; + case CPU_DYING: + case CPU_DYING_FROZEN: + disable_percpu_irq(kvm_vgic_global_state.maint_irq); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block vgic_cpu_nb = { + .notifier_call = vgic_cpu_notify, +}; + +static irqreturn_t vgic_maintenance_handler(int irq, void *data) +{ + /* + * We cannot rely on the vgic maintenance interrupt to be + * delivered synchronously. This means we can only use it to + * exit the VM, and we perform the handling of EOIed + * interrupts on the exit path (see vgic_process_maintenance). + */ + return IRQ_HANDLED; +} + +/** + * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable + * according to the host GIC model. Accordingly calls either + * vgic_v2/v3_probe which registers the KVM_DEVICE that can be + * instantiated by a guest later on . + */ +int kvm_vgic_hyp_init(void) +{ + const struct gic_kvm_info *gic_kvm_info; + int ret; + + gic_kvm_info = gic_get_kvm_info(); + if (!gic_kvm_info) + return -ENODEV; + + if (!gic_kvm_info->maint_irq) { + kvm_err("No vgic maintenance irq\n"); + return -ENXIO; + } + + switch (gic_kvm_info->type) { + case GIC_V2: + ret = vgic_v2_probe(gic_kvm_info); + break; + case GIC_V3: + ret = vgic_v3_probe(gic_kvm_info); + break; + default: + ret = -ENODEV; + }; + + if (ret) + return ret; + + kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq; + ret = request_percpu_irq(kvm_vgic_global_state.maint_irq, + vgic_maintenance_handler, + "vgic", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", + kvm_vgic_global_state.maint_irq); + return ret; + } + + ret = __register_cpu_notifier(&vgic_cpu_nb); + if (ret) { + kvm_err("Cannot register vgic CPU notifier\n"); + goto out_free_irq; + } + + on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); + + kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); + return 0; + +out_free_irq: + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); + return ret; +} diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index d943059ee6c5..09777c852b16 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include "vgic.h" @@ -203,3 +205,65 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; } + +/** + * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT + * @node: pointer to the DT node + * + * Returns 0 if a GICv2 has been found, returns an error code otherwise + */ +int vgic_v2_probe(const struct gic_kvm_info *info) +{ + int ret; + u32 vtr; + + if (!info->vctrl.start) { + kvm_err("GICH not present in the firmware table\n"); + return -ENXIO; + } + + if (!PAGE_ALIGNED(info->vcpu.start)) { + kvm_err("GICV physical address 0x%llx not page aligned\n", + (unsigned long long)info->vcpu.start); + return -ENXIO; + } + + if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { + kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", + (unsigned long long)resource_size(&info->vcpu), + PAGE_SIZE); + return -ENXIO; + } + + kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start, + resource_size(&info->vctrl)); + if (!kvm_vgic_global_state.vctrl_base) { + kvm_err("Cannot ioremap GICH\n"); + return -ENOMEM; + } + + vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); + kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; + + ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base, + kvm_vgic_global_state.vctrl_base + + resource_size(&info->vctrl), + info->vctrl.start); + + if (ret) { + kvm_err("Cannot map VCTRL into hyp\n"); + iounmap(kvm_vgic_global_state.vctrl_base); + return ret; + } + + kvm_vgic_global_state.can_emulate_gicv2 = true; + kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); + + kvm_vgic_global_state.vcpu_base = info->vcpu.start; + kvm_vgic_global_state.type = VGIC_V2; + kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; + + kvm_info("vgic-v2@%llx\n", info->vctrl.start); + + return 0; +} diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 8548297c6f76..de0e8e0b2625 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include "vgic.h" @@ -182,3 +185,49 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; } + +/** + * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT + * @node: pointer to the DT node + * + * Returns 0 if a GICv3 has been found, returns an error code otherwise + */ +int vgic_v3_probe(const struct gic_kvm_info *info) +{ + u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); + + /* + * The ListRegs field is 5 bits, but there is a architectural + * maximum of 16 list registers. Just ignore bit 4... + */ + kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; + kvm_vgic_global_state.can_emulate_gicv2 = false; + + if (!info->vcpu.start) { + kvm_info("GICv3: no GICV resource entry\n"); + kvm_vgic_global_state.vcpu_base = 0; + } else if (!PAGE_ALIGNED(info->vcpu.start)) { + pr_warn("GICV physical address 0x%llx not page aligned\n", + (unsigned long long)info->vcpu.start); + kvm_vgic_global_state.vcpu_base = 0; + } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { + pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", + (unsigned long long)resource_size(&info->vcpu), + PAGE_SIZE); + kvm_vgic_global_state.vcpu_base = 0; + } else { + kvm_vgic_global_state.vcpu_base = info->vcpu.start; + kvm_vgic_global_state.can_emulate_gicv2 = true; + kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); + kvm_info("vgic-v2@%llx\n", info->vcpu.start); + } + if (kvm_vgic_global_state.vcpu_base == 0) + kvm_info("disabling GICv2 emulation\n"); + kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); + + kvm_vgic_global_state.vctrl_base = NULL; + kvm_vgic_global_state.type = VGIC_V3; + kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; + + return 0; +} diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index de9dc7170c1b..f4244b6eb4b8 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -16,6 +16,8 @@ #ifndef __KVM_ARM_VGIC_NEW_H__ #define __KVM_ARM_VGIC_NEW_H__ +#include + #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b @@ -51,6 +53,7 @@ int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +int vgic_v2_probe(const struct gic_kvm_info *info); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); @@ -62,6 +65,7 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); #else static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) @@ -95,6 +99,11 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) { } +static inline int vgic_v3_probe(const struct gic_kvm_info *info) +{ + return -ENODEV; +} + static inline int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address) { -- cgit v1.2.1 From 5e6431da8f3a04759ac8d77b7c98eec0de580343 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 14:50:50 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_init: implement vgic_create This patch implements the vgic_creation function which is called on CREATE_IRQCHIP VM IOCTL (v2 only) or KVM_CREATE_DEVICE Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-init.c | 84 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 4523beb029f3..15d54284305b 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -22,6 +22,90 @@ #include #include "vgic.h" +/* CREATION */ + +/** + * kvm_vgic_create: triggered by the instantiation of the VGIC device by + * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only) + * or through the generic KVM_CREATE_DEVICE API ioctl. + * irqchip_in_kernel() tells you if this function succeeded or not. + */ +int kvm_vgic_create(struct kvm *kvm, u32 type) +{ + int i, vcpu_lock_idx = -1, ret; + struct kvm_vcpu *vcpu; + + mutex_lock(&kvm->lock); + + if (irqchip_in_kernel(kvm)) { + ret = -EEXIST; + goto out; + } + + /* + * This function is also called by the KVM_CREATE_IRQCHIP handler, + * which had no chance yet to check the availability of the GICv2 + * emulation. So check this here again. KVM_CREATE_DEVICE does + * the proper checks already. + */ + if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && + !kvm_vgic_global_state.can_emulate_gicv2) { + ret = -ENODEV; + goto out; + } + + /* + * Any time a vcpu is run, vcpu_load is called which tries to grab the + * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure + * that no other VCPUs are run while we create the vgic. + */ + ret = -EBUSY; + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!mutex_trylock(&vcpu->mutex)) + goto out_unlock; + vcpu_lock_idx = i; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu->arch.has_run_once) + goto out_unlock; + } + ret = 0; + + if (type == KVM_DEV_TYPE_ARM_VGIC_V2) + kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; + else + kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS; + + if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) { + ret = -E2BIG; + goto out_unlock; + } + + kvm->arch.vgic.in_kernel = true; + kvm->arch.vgic.vgic_model = type; + + /* + * kvm_vgic_global_state.vctrl_base is set on vgic probe (kvm_arch_init) + * it is stored in distributor struct for asm save/restore purpose + */ + kvm->arch.vgic.vctrl_base = kvm_vgic_global_state.vctrl_base; + + kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; + kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF; + +out_unlock: + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { + vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); + mutex_unlock(&vcpu->mutex); + } + +out: + mutex_unlock(&kvm->lock); + return ret; +} + /* GENERIC PROBE */ static void vgic_init_maintenance_interrupt(void *info) -- cgit v1.2.1 From ad275b8bb1e659b14120174d87e3c1fdc22e9978 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 18:09:38 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_init: implement vgic_init This patch allocates and initializes the data structures used to model the vgic distributor and virtual cpu interfaces. At that stage the number of IRQs and number of virtual CPUs is frozen. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-init.c | 217 ++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v2.c | 5 + virt/kvm/arm/vgic/vgic-v3.c | 5 + virt/kvm/arm/vgic/vgic.c | 4 + virt/kvm/arm/vgic/vgic.h | 8 ++ 5 files changed, 239 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 15d54284305b..bed3240bd634 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -22,6 +22,42 @@ #include #include "vgic.h" +/* + * Initialization rules: there are multiple stages to the vgic + * initialization, both for the distributor and the CPU interfaces. + * + * Distributor: + * + * - kvm_vgic_early_init(): initialization of static data that doesn't + * depend on any sizing information or emulation type. No allocation + * is allowed there. + * + * - vgic_init(): allocation and initialization of the generic data + * structures that depend on sizing information (number of CPUs, + * number of interrupts). Also initializes the vcpu specific data + * structures. Can be executed lazily for GICv2. + * + * CPU Interface: + * + * - kvm_vgic_cpu_early_init(): initialization of static data that + * doesn't depend on any sizing information or emulation type. No + * allocation is allowed there. + */ + +/* EARLY INIT */ + +/* + * Those 2 functions should not be needed anymore but they + * still are called from arm.c + */ +void kvm_vgic_early_init(struct kvm *kvm) +{ +} + +void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu) +{ +} + /* CREATION */ /** @@ -29,6 +65,8 @@ * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only) * or through the generic KVM_CREATE_DEVICE API ioctl. * irqchip_in_kernel() tells you if this function succeeded or not. + * @kvm: kvm struct pointer + * @type: KVM_DEV_TYPE_ARM_VGIC_V[23] */ int kvm_vgic_create(struct kvm *kvm, u32 type) { @@ -106,6 +144,185 @@ out: return ret; } +/* INIT/DESTROY */ + +/** + * kvm_vgic_dist_init: initialize the dist data structures + * @kvm: kvm struct pointer + * @nr_spis: number of spis, frozen by caller + */ +static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0); + int i; + + dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL); + if (!dist->spis) + return -ENOMEM; + + /* + * In the following code we do not take the irq struct lock since + * no other action on irq structs can happen while the VGIC is + * not initialized yet: + * If someone wants to inject an interrupt or does a MMIO access, we + * require prior initialization in case of a virtual GICv3 or trigger + * initialization when using a virtual GICv2. + */ + for (i = 0; i < nr_spis; i++) { + struct vgic_irq *irq = &dist->spis[i]; + + irq->intid = i + VGIC_NR_PRIVATE_IRQS; + INIT_LIST_HEAD(&irq->ap_list); + spin_lock_init(&irq->irq_lock); + irq->vcpu = NULL; + irq->target_vcpu = vcpu0; + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) + irq->targets = 0; + else + irq->mpidr = 0; + } + return 0; +} + +/** + * kvm_vgic_vcpu_init: initialize the vcpu data structures and + * enable the VCPU interface + * @vcpu: the VCPU which's VGIC should be initialized + */ +static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + int i; + + INIT_LIST_HEAD(&vgic_cpu->ap_list_head); + spin_lock_init(&vgic_cpu->ap_list_lock); + + /* + * Enable and configure all SGIs to be edge-triggered and + * configure all PPIs as level-triggered. + */ + for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { + struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; + + INIT_LIST_HEAD(&irq->ap_list); + spin_lock_init(&irq->irq_lock); + irq->intid = i; + irq->vcpu = NULL; + irq->target_vcpu = vcpu; + irq->targets = 1U << vcpu->vcpu_id; + if (vgic_irq_is_sgi(i)) { + /* SGIs */ + irq->enabled = 1; + irq->config = VGIC_CONFIG_EDGE; + } else { + /* PPIs */ + irq->config = VGIC_CONFIG_LEVEL; + } + } + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_enable(vcpu); + else + vgic_v3_enable(vcpu); +} + +/* + * vgic_init: allocates and initializes dist and vcpu data structures + * depending on two dimensioning parameters: + * - the number of spis + * - the number of vcpus + * The function is generally called when nr_spis has been explicitly set + * by the guest through the KVM DEVICE API. If not nr_spis is set to 256. + * vgic_initialized() returns true when this function has succeeded. + * Must be called with kvm->lock held! + */ +int vgic_init(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct kvm_vcpu *vcpu; + int ret = 0, i; + + if (vgic_initialized(kvm)) + return 0; + + /* freeze the number of spis */ + if (!dist->nr_spis) + dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; + + ret = kvm_vgic_dist_init(kvm, dist->nr_spis); + if (ret) + goto out; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vgic_vcpu_init(vcpu); + + dist->initialized = true; +out: + return ret; +} + +static void kvm_vgic_dist_destroy(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + + mutex_lock(&kvm->lock); + + dist->ready = false; + dist->initialized = false; + + kfree(dist->spis); + kfree(dist->redist_iodevs); + dist->nr_spis = 0; + + mutex_unlock(&kvm->lock); +} + +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + + INIT_LIST_HEAD(&vgic_cpu->ap_list_head); +} + +void kvm_vgic_destroy(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_vgic_dist_destroy(kvm); + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vgic_vcpu_destroy(vcpu); +} + +/** + * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest + * is a GICv2. A GICv3 must be explicitly initialized by the guest using the + * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. + * @kvm: kvm struct pointer + */ +int vgic_lazy_init(struct kvm *kvm) +{ + int ret = 0; + + if (unlikely(!vgic_initialized(kvm))) { + /* + * We only provide the automatic initialization of the VGIC + * for the legacy case of a GICv2. Any other type must + * be explicitly initialized once setup with the respective + * KVM device call. + */ + if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) + return -EBUSY; + + mutex_lock(&kvm->lock); + ret = vgic_init(kvm); + mutex_unlock(&kvm->lock); + } + + return ret; +} + /* GENERIC PROBE */ static void vgic_init_maintenance_interrupt(void *info) diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 09777c852b16..fcbfa38e8c41 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -206,6 +206,11 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_PRIMASK_SHIFT; } +/* not yet implemented */ +void vgic_v2_enable(struct kvm_vcpu *vcpu) +{ +} + /** * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT * @node: pointer to the DT node diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index de0e8e0b2625..d1c0285c56e9 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -186,6 +186,11 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; } +/* not yet implemented */ +void vgic_v3_enable(struct kvm_vcpu *vcpu) +{ +} + /** * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * @node: pointer to the DT node diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 12ae84b4931f..331885528ead 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -257,6 +257,10 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, trace_vgic_update_irq_pending(cpuid, intid, level); + ret = vgic_lazy_init(kvm); + if (ret) + return ret; + vcpu = kvm_get_vcpu(kvm, cpuid); if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) return -EINVAL; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index f4244b6eb4b8..5951551820b9 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -53,6 +53,7 @@ int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val); void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +void vgic_v2_enable(struct kvm_vcpu *vcpu); int vgic_v2_probe(const struct gic_kvm_info *info); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); @@ -65,6 +66,7 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); +void vgic_v3_enable(struct kvm_vcpu *vcpu); int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); #else @@ -99,6 +101,10 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) { } +static inline void vgic_v3_enable(struct kvm_vcpu *vcpu) +{ +} + static inline int vgic_v3_probe(const struct gic_kvm_info *info) { return -ENODEV; @@ -112,5 +118,7 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm, #endif void kvm_register_vgic_device(unsigned long type); +int vgic_lazy_init(struct kvm *kvm); +int vgic_init(struct kvm *kvm); #endif -- cgit v1.2.1 From b0442ee227e826afc4df16cdfb8bd6eef6a8f425 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Mon, 21 Dec 2015 15:04:42 +0100 Subject: KVM: arm/arm64: vgic-new: vgic_init: implement map_resources map_resources is the last initialization step. It is executed on first VCPU run. At that stage the code checks that userspace has provided the base addresses for the relevant VGIC regions, which depend on the type of VGIC that is exposed to the guest. Also we check if the two regions overlap. If the checks succeeded, we register the respective register frames with the kvm_io_bus framework. If we emulate a GICv2, the function also forces vgic_init execution if it has not been executed yet. Also we map the virtual GIC CPU interface onto the guest's CPU interface. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-init.c | 28 +++++++++++++++++ virt/kvm/arm/vgic/vgic-v2.c | 69 +++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v3.c | 71 +++++++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 7 +++++ 4 files changed, 175 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index bed3240bd634..a1442f7c9c4d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -323,6 +323,34 @@ int vgic_lazy_init(struct kvm *kvm) return ret; } +/* RESOURCE MAPPING */ + +/** + * Map the MMIO regions depending on the VGIC model exposed to the guest + * called on the first VCPU run. + * Also map the virtual CPU interface into the VM. + * v2/v3 derivatives call vgic_init if not already done. + * vgic_ready() returns true if this function has succeeded. + * @kvm: kvm struct pointer + */ +int kvm_vgic_map_resources(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int ret = 0; + + mutex_lock(&kvm->lock); + if (!irqchip_in_kernel(kvm)) + goto out; + + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) + ret = vgic_v2_map_resources(kvm); + else + ret = vgic_v3_map_resources(kvm); +out: + mutex_unlock(&kvm->lock); + return ret; +} + /* GENERIC PROBE */ static void vgic_init_maintenance_interrupt(void *info) diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index fcbfa38e8c41..1fe031b7ba43 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -211,6 +211,75 @@ void vgic_v2_enable(struct kvm_vcpu *vcpu) { } +/* check for overlapping regions and for regions crossing the end of memory */ +static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base) +{ + if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base) + return false; + if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base) + return false; + + if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base) + return true; + if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base) + return true; + + return false; +} + +int vgic_v2_map_resources(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + int ret = 0; + + if (vgic_ready(kvm)) + goto out; + + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || + IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { + kvm_err("Need to set vgic cpu and dist addresses first\n"); + ret = -ENXIO; + goto out; + } + + if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { + kvm_err("VGIC CPU and dist frames overlap\n"); + ret = -EINVAL; + goto out; + } + + /* + * Initialize the vgic if this hasn't already been done on demand by + * accessing the vgic state from userspace. + */ + ret = vgic_init(kvm); + if (ret) { + kvm_err("Unable to initialize VGIC dynamic data structures\n"); + goto out; + } + + ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); + if (ret) { + kvm_err("Unable to register VGIC MMIO regions\n"); + goto out; + } + + ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, + kvm_vgic_global_state.vcpu_base, + KVM_VGIC_V2_CPU_SIZE, true); + if (ret) { + kvm_err("Unable to remap VGIC CPU to VCPU\n"); + goto out; + } + + dist->ready = true; + +out: + if (ret) + kvm_vgic_destroy(kvm); + return ret; +} + /** * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT * @node: pointer to the DT node diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index d1c0285c56e9..637ff2b85165 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -191,6 +191,77 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) { } +/* check for overlapping regions and for regions crossing the end of memory */ +static bool vgic_v3_check_base(struct kvm *kvm) +{ + struct vgic_dist *d = &kvm->arch.vgic; + gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE; + + redist_size *= atomic_read(&kvm->online_vcpus); + + if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) + return false; + if (d->vgic_redist_base + redist_size < d->vgic_redist_base) + return false; + + if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base) + return true; + if (d->vgic_redist_base + redist_size <= d->vgic_dist_base) + return true; + + return false; +} + +int vgic_v3_map_resources(struct kvm *kvm) +{ + int ret = 0; + struct vgic_dist *dist = &kvm->arch.vgic; + + if (vgic_ready(kvm)) + goto out; + + if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || + IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { + kvm_err("Need to set vgic distributor addresses first\n"); + ret = -ENXIO; + goto out; + } + + if (!vgic_v3_check_base(kvm)) { + kvm_err("VGIC redist and dist frames overlap\n"); + ret = -EINVAL; + goto out; + } + + /* + * For a VGICv3 we require the userland to explicitly initialize + * the VGIC before we need to use it. + */ + if (!vgic_initialized(kvm)) { + ret = -EBUSY; + goto out; + } + + ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); + if (ret) { + kvm_err("Unable to register VGICv3 dist MMIO regions\n"); + goto out; + } + + ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base); + if (ret) { + kvm_err("Unable to register VGICv3 redist MMIO regions\n"); + goto out; + } + + dist->ready = true; + +out: + if (ret) + kvm_vgic_destroy(kvm); + return ret; +} + /** * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT * @node: pointer to the DT node diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 5951551820b9..7b300ca370b7 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -55,6 +55,7 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v2_enable(struct kvm_vcpu *vcpu); int vgic_v2_probe(const struct gic_kvm_info *info); +int vgic_v2_map_resources(struct kvm *kvm); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); @@ -68,6 +69,7 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_enable(struct kvm_vcpu *vcpu); int vgic_v3_probe(const struct gic_kvm_info *info); +int vgic_v3_map_resources(struct kvm *kvm); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); #else static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) @@ -110,6 +112,11 @@ static inline int vgic_v3_probe(const struct gic_kvm_info *info) return -ENODEV; } +static inline int vgic_v3_map_resources(struct kvm *kvm) +{ + return -ENODEV; +} + static inline int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address) { -- cgit v1.2.1 From f7b6985cc3d0f0f6f35990301d80d858c148e10c Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Wed, 2 Dec 2015 10:30:13 +0100 Subject: KVM: arm/arm64: vgic-new: Add vgic_v2/v3_enable Enable the VGIC operation by properly initialising the registers in the hypervisor GIC interface. Signed-off-by: Eric Auger Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-v2.c | 11 ++++++++++- virt/kvm/arm/vgic/vgic-v3.c | 23 ++++++++++++++++++++++- 2 files changed, 32 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 1fe031b7ba43..8ad42c217770 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -206,9 +206,18 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_PRIMASK_SHIFT; } -/* not yet implemented */ void vgic_v2_enable(struct kvm_vcpu *vcpu) { + /* + * By forcing VMCR to zero, the GIC will restore the binary + * points to their reset values. Anything else resets to zero + * anyway. + */ + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0; + + /* Get the show on the road... */ + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; } /* check for overlapping regions and for regions crossing the end of memory */ diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 637ff2b85165..336a46115937 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -186,9 +186,30 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; } -/* not yet implemented */ void vgic_v3_enable(struct kvm_vcpu *vcpu) { + struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; + + /* + * By forcing VMCR to zero, the GIC will restore the binary + * points to their reset values. Anything else resets to zero + * anyway. + */ + vgic_v3->vgic_vmcr = 0; + vgic_v3->vgic_elrsr = ~0; + + /* + * If we are emulating a GICv3, we do it in an non-GICv2-compatible + * way, so we force SRE to 1 to demonstrate this to the guest. + * This goes with the spec allowing the value to be RAO/WI. + */ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; + else + vgic_v3->vgic_sre = 0; + + /* Get the show on the road... */ + vgic_v3->vgic_hcr = ICH_HCR_EN; } /* check for overlapping regions and for regions crossing the end of memory */ -- cgit v1.2.1 From 03f0c94c73b9d7d55e057e4035cf3127ac44d41e Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 25 Mar 2016 00:04:53 +0000 Subject: KVM: arm/arm64: vgic-new: Wire up irqfd injection Connect to the new VGIC to the irqfd framework, so that we can inject IRQs. GSI routing and MSI routing is not yet implemented. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-irqfd.c | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 virt/kvm/arm/vgic/vgic-irqfd.c (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c new file mode 100644 index 000000000000..c675513270bb --- /dev/null +++ b/virt/kvm/arm/vgic/vgic-irqfd.c @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015, 2016 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +int kvm_irq_map_gsi(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *entries, + int gsi) +{ + return 0; +} + +int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip, + unsigned int pin) +{ + return pin; +} + +int kvm_set_irq(struct kvm *kvm, int irq_source_id, + u32 irq, int level, bool line_status) +{ + unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS; + + trace_kvm_set_irq(irq, level, irq_source_id); + + BUG_ON(!vgic_initialized(kvm)); + + return kvm_vgic_inject_irq(kvm, 0, spi, level); +} + +/* MSI not implemented yet */ +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + return 0; +} -- cgit v1.2.1 From 568e8c901eaa62004640cad8b9773819f27461a0 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 22 Dec 2015 00:52:33 +0000 Subject: KVM: arm/arm64: vgic-new: implement mapped IRQ handling We now store the mapped hardware IRQ number in our struct, so we don't need the irq_phys_map for the new VGIC. Implement the hardware IRQ mapping on top of the reworked arch timer interface. Signed-off-by: Andre Przywara Reviewed-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 331885528ead..69b61abefa19 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -312,6 +312,47 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, return vgic_update_irq_pending(kvm, cpuid, intid, level, false); } +int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid, + bool level) +{ + return vgic_update_irq_pending(kvm, cpuid, intid, level, true); +} + +int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) +{ + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); + + BUG_ON(!irq); + + spin_lock(&irq->irq_lock); + + irq->hw = true; + irq->hwintid = phys_irq; + + spin_unlock(&irq->irq_lock); + + return 0; +} + +int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) +{ + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); + + BUG_ON(!irq); + + if (!vgic_initialized(vcpu->kvm)) + return -EAGAIN; + + spin_lock(&irq->irq_lock); + + irq->hw = false; + irq->hwintid = 0; + + spin_unlock(&irq->irq_lock); + + return 0; +} + /** * vgic_prune_ap_list - Remove non-relevant interrupts from the list * @@ -564,3 +605,15 @@ void vgic_kick_vcpus(struct kvm *kvm) kvm_vcpu_kick(vcpu); } } + +bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) +{ + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); + bool map_is_active; + + spin_lock(&irq->irq_lock); + map_is_active = irq->hw && irq->active; + spin_unlock(&irq->irq_lock); + + return map_is_active; +} -- cgit v1.2.1 From efffe55af5e16f7935aa0175cf25c386f08219f5 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 16 Mar 2016 15:06:41 +0000 Subject: KVM: arm/arm64: vgic-new: enable build Now that the new VGIC implementation has reached feature parity with the old one, add the new files to the build system and add a Kconfig option to switch between the two versions. We set the default to the new version to get maximum test coverage, in case people experience problems they can switch back to the old behaviour if needed. Signed-off-by: Andre Przywara Acked-by: Christoffer Dall --- virt/kvm/arm/hyp/vgic-v2-sr.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'virt') diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index caac41f48815..a3f12b3b277b 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -21,7 +21,12 @@ #include +#ifdef CONFIG_KVM_NEW_VGIC +extern struct vgic_global kvm_vgic_global_state; +#define vgic_v2_params kvm_vgic_global_state +#else extern struct vgic_params vgic_v2_params; +#endif static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, void __iomem *base) -- cgit v1.2.1 From 35a2d58588f0992627e74b447ccab21570544c86 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 20 May 2016 15:25:28 +0200 Subject: KVM: arm/arm64: vgic-new: Synchronize changes to active state When modifying the active state of an interrupt via the MMIO interface, we should ensure that the write has the intended effect. If a guest sets an interrupt to active, but that interrupt is already flushed into a list register on a running VCPU, then that VCPU will write the active state back into the struct vgic_irq upon returning from the guest and syncing its state. This is a non-benign race, because the guest can observe that an interrupt is not active, and it can have a reasonable expectations that other VCPUs will not ack any IRQs, and then set the state to active, and expect it to stay that way. Currently we are not honoring this case. Thefore, change both the SACTIVE and CACTIVE mmio handlers to stop the world, change the irq state, potentially queue the irq if we're setting it to active, and then continue. We take this chance to slightly optimize these functions by not stopping the world when touching private interrupts where there is inherently no possible race. Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-mmio.c | 105 ++++++++++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 39 deletions(-) (limited to 'virt') diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 4ef35719fcbe..059595ec3da0 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -173,6 +173,66 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, return value; } +static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + bool new_active_state) +{ + spin_lock(&irq->irq_lock); + /* + * If this virtual IRQ was written into a list register, we + * have to make sure the CPU that runs the VCPU thread has + * synced back LR state to the struct vgic_irq. We can only + * know this for sure, when either this irq is not assigned to + * anyone's AP list anymore, or the VCPU thread is not + * running on any CPUs. + * + * In the opposite case, we know the VCPU thread may be on its + * way back from the guest and still has to sync back this + * IRQ, so we release and re-acquire the spin_lock to let the + * other thread sync back the IRQ. + */ + while (irq->vcpu && /* IRQ may have state in an LR somewhere */ + irq->vcpu->cpu != -1) { /* VCPU thread is running */ + BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); + cond_resched_lock(&irq->irq_lock); + } + + irq->active = new_active_state; + if (new_active_state) + vgic_queue_irq_unlock(vcpu->kvm, irq); + else + spin_unlock(&irq->irq_lock); +} + +/* + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ + * is not queued on some running VCPU's LRs, because then the change to the + * active state can be overwritten when the VCPU's state is synced coming back + * from the guest. + * + * For shared interrupts, we have to stop all the VCPUs because interrupts can + * be migrated while we don't hold the IRQ locks and we don't want to be + * chasing moving targets. + * + * For private interrupts, we only have to make sure the single and only VCPU + * that can potentially queue the IRQ is stopped. + */ +static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) +{ + if (intid < VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_vcpu(vcpu); + else + kvm_arm_halt_guest(vcpu->kvm); +} + +/* See vgic_change_active_prepare */ +static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) +{ + if (intid < VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_vcpu(vcpu); + else + kvm_arm_resume_guest(vcpu->kvm); +} + void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -180,32 +240,12 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; - kvm_arm_halt_guest(vcpu->kvm); + vgic_change_active_prepare(vcpu, intid); for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - - spin_lock(&irq->irq_lock); - /* - * If this virtual IRQ was written into a list register, we - * have to make sure the CPU that runs the VCPU thread has - * synced back LR state to the struct vgic_irq. We can only - * know this for sure, when either this irq is not assigned to - * anyone's AP list anymore, or the VCPU thread is not - * running on any CPUs. - * - * In the opposite case, we know the VCPU thread may be on its - * way back from the guest and still has to sync back this - * IRQ, so we release and re-acquire the spin_lock to let the - * other thread sync back the IRQ. - */ - while (irq->vcpu && /* IRQ may have state in an LR somewhere */ - irq->vcpu->cpu != -1) /* VCPU thread is running */ - cond_resched_lock(&irq->irq_lock); - - irq->active = false; - spin_unlock(&irq->irq_lock); + vgic_mmio_change_active(vcpu, irq, false); } - kvm_arm_resume_guest(vcpu->kvm); + vgic_change_active_finish(vcpu, intid); } void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, @@ -215,25 +255,12 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + vgic_change_active_prepare(vcpu, intid); for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - - spin_lock(&irq->irq_lock); - - /* - * If the IRQ was already active or there is no target VCPU - * assigned at the moment, then just proceed. - */ - if (irq->active || !irq->target_vcpu) { - irq->active = true; - - spin_unlock(&irq->irq_lock); - continue; - } - - irq->active = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_mmio_change_active(vcpu, irq, true); } + vgic_change_active_finish(vcpu, intid); } unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, -- cgit v1.2.1