diff options
-rw-r--r-- | include/kvm/arm_vgic.h | 3 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 5 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 49 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 2 |
4 files changed, 59 insertions, 0 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7a30524a80ee..ded50a30e2d5 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -249,6 +249,9 @@ struct vgic_dist { struct list_head lpi_list_head; int lpi_list_count; + /* LPI translation cache */ + struct list_head lpi_translation_cache; + /* used by vgic-debug */ struct vgic_state_iter *iter; diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index bdbc297d06fb..80127ca9269f 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -53,6 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm) struct vgic_dist *dist = &kvm->arch.vgic; INIT_LIST_HEAD(&dist->lpi_list_head); + INIT_LIST_HEAD(&dist->lpi_translation_cache); raw_spin_lock_init(&dist->lpi_list_lock); } @@ -294,6 +295,7 @@ int vgic_init(struct kvm *kvm) } if (vgic_has_its(kvm)) { + vgic_lpi_translation_cache_init(kvm); ret = vgic_v4_init(kvm); if (ret) goto out; @@ -335,6 +337,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) INIT_LIST_HEAD(&dist->rd_regions); } + if (vgic_has_its(kvm)) + vgic_lpi_translation_cache_destroy(kvm); + if (vgic_supports_direct_msis(kvm)) vgic_v4_teardown(kvm); } diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 482036612adf..0e5c1519bbe2 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -138,6 +138,14 @@ struct its_ite { u32 event_id; }; +struct vgic_translation_cache_entry { + struct list_head entry; + phys_addr_t db; + u32 devid; + u32 eventid; + struct vgic_irq *irq; +}; + /** * struct vgic_its_abi - ITS abi ops and settings * @cte_esz: collection table entry size @@ -1657,6 +1665,45 @@ out: return ret; } +/* Default is 16 cached LPIs per vcpu */ +#define LPI_DEFAULT_PCPU_CACHE_SIZE 16 + +void vgic_lpi_translation_cache_init(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + unsigned int sz; + int i; + + if (!list_empty(&dist->lpi_translation_cache)) + return; + + sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE; + + for (i = 0; i < sz; i++) { + struct vgic_translation_cache_entry *cte; + + /* An allocation failure is not fatal */ + cte = kzalloc(sizeof(*cte), GFP_KERNEL); + if (WARN_ON(!cte)) + break; + + INIT_LIST_HEAD(&cte->entry); + list_add(&cte->entry, &dist->lpi_translation_cache); + } +} + +void vgic_lpi_translation_cache_destroy(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + struct vgic_translation_cache_entry *cte, *tmp; + + list_for_each_entry_safe(cte, tmp, + &dist->lpi_translation_cache, entry) { + list_del(&cte->entry); + kfree(cte); + } +} + #define INITIAL_BASER_VALUE \ (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ @@ -1685,6 +1732,8 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) kfree(its); return ret; } + + vgic_lpi_translation_cache_init(dev->kvm); } mutex_init(&its->its_lock); diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 797e05004d80..a25b790ffa4e 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -307,6 +307,8 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr); int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid, struct vgic_irq **irq); struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi); +void vgic_lpi_translation_cache_init(struct kvm *kvm); +void vgic_lpi_translation_cache_destroy(struct kvm *kvm); bool vgic_supports_direct_msis(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm); |