diff options
author | Andre Przywara <andre.przywara@arm.com> | 2016-07-15 14:43:33 +0300 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2016-07-18 20:14:36 +0300 |
commit | 3802411d01880c4283426d22653e011159b1c947 (patch) | |
tree | 07aa88896048f97dc8c8a3156c4468410c6692df /virt/kvm/arm/vgic/vgic.c | |
parent | 424c33830f53f248a68da125e70d9a4d95a8e010 (diff) | |
download | linux-3802411d01880c4283426d22653e011159b1c947.tar.xz |
KVM: arm64: vgic-its: Connect LPIs to the VGIC emulation
LPIs are dynamically created (mapped) at guest runtime and their
actual number can be quite high, but is mostly assigned using a very
sparse allocation scheme. So arrays are not an ideal data structure
to hold the information.
We use a spin-lock protected linked list to hold all mapped LPIs,
represented by their struct vgic_irq. This lock is grouped between the
ap_list_lock and the vgic_irq lock in our locking order.
Also we store a pointer to that struct vgic_irq in our struct its_itte,
so we can easily access it.
Eventually we call our new vgic_get_lpi() from vgic_get_irq(), so
the VGIC code gets transparently access to LPIs.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm/arm/vgic/vgic.c')
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 63 |
1 files changed, 57 insertions, 6 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index d3ba1b4227e7..53299fc93c15 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -36,7 +36,8 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state; * its->cmd_lock (mutex) * its->its_lock (mutex) * vgic_cpu->ap_list_lock - * vgic_irq->irq_lock + * kvm->lpi_list_lock + * vgic_irq->irq_lock * * If you need to take multiple locks, always take the upper lock first, * then the lower ones, e.g. first take the its_lock, then the irq_lock. @@ -51,6 +52,41 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state; * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); */ +/* + * Iterate over the VM's list of mapped LPIs to find the one with a + * matching interrupt ID and return a reference to the IRQ structure. + */ +static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct vgic_irq *irq = NULL; + + spin_lock(&dist->lpi_list_lock); + + list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { + if (irq->intid != intid) + continue; + + /* + * This increases the refcount, the caller is expected to + * call vgic_put_irq() later once it's finished with the IRQ. + */ + kref_get(&irq->refcount); + goto out_unlock; + } + irq = NULL; + +out_unlock: + spin_unlock(&dist->lpi_list_lock); + + return irq; +} + +/* + * This looks up the virtual interrupt ID to get the corresponding + * struct vgic_irq. It also increases the refcount, so any caller is expected + * to call vgic_put_irq() once it's finished with this IRQ. + */ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid) { @@ -62,9 +98,9 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, if (intid <= VGIC_MAX_SPI) return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; - /* LPIs are not yet covered */ + /* LPIs */ if (intid >= VGIC_MIN_LPI) - return NULL; + return vgic_get_lpi(kvm, intid); WARN(1, "Looking up struct vgic_irq for reserved INTID"); return NULL; @@ -78,18 +114,33 @@ static void vgic_get_irq_kref(struct vgic_irq *irq) kref_get(&irq->refcount); } -/* The refcount should never drop to 0 at the moment. */ +/* + * We can't do anything in here, because we lack the kvm pointer to + * lock and remove the item from the lpi_list. So we keep this function + * empty and use the return value of kref_put() to trigger the freeing. + */ static void vgic_irq_release(struct kref *ref) { - WARN_ON(1); } void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) { + struct vgic_dist *dist; + if (irq->intid < VGIC_MIN_LPI) return; - kref_put(&irq->refcount, vgic_irq_release); + if (!kref_put(&irq->refcount, vgic_irq_release)) + return; + + dist = &kvm->arch.vgic; + + spin_lock(&dist->lpi_list_lock); + list_del(&irq->lpi_list); + dist->lpi_list_count--; + spin_unlock(&dist->lpi_list_lock); + + kfree(irq); } /** |