diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2024-04-22 23:01:50 +0300 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2024-04-25 15:19:56 +0300 |
commit | ec39bbfd55d07de2e2d4111f35c7ad9523c89ec3 (patch) | |
tree | 31c347027da3afb6d0cd6796d8886134c249a770 | |
parent | e64f2918c6e7a2c2cbf310d1b571d1a886b91475 (diff) | |
download | linux-ec39bbfd55d07de2e2d4111f35c7ad9523c89ec3.tar.xz |
KVM: arm64: vgic-its: Rip out the global translation cache
The MSI injection fast path has been transitioned away from the global
translation cache. Rip it out.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-12-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-init.c | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-its.c | 121 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic.h | 2 | ||||
-rw-r--r-- | include/kvm/arm_vgic.h | 3 |
4 files changed, 4 insertions, 129 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index f20941f83a07..6ee42f395253 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -53,7 +53,6 @@ void kvm_vgic_early_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; - INIT_LIST_HEAD(&dist->lpi_translation_cache); raw_spin_lock_init(&dist->lpi_list_lock); xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ); } @@ -305,9 +304,6 @@ int vgic_init(struct kvm *kvm) } } - if (vgic_has_its(kvm)) - vgic_lpi_translation_cache_init(kvm); - /* * If we have GICv4.1 enabled, unconditionally request enable the * v4 support so that we get HW-accelerated vSGIs. Otherwise, only @@ -361,9 +357,6 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) dist->vgic_cpu_base = VGIC_ADDR_UNDEF; } - if (vgic_has_its(kvm)) - vgic_lpi_translation_cache_destroy(kvm); - if (vgic_supports_direct_msis(kvm)) vgic_v4_teardown(kvm); diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 9a517faa43ae..bb7f4fd35b2b 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -149,14 +149,6 @@ struct its_ite { u32 event_id; }; -struct vgic_translation_cache_entry { - struct list_head entry; - phys_addr_t db; - u32 devid; - u32 eventid; - struct vgic_irq *irq; -}; - /** * struct vgic_its_abi - ITS abi ops and settings * @cte_esz: collection table entry size @@ -568,96 +560,34 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, struct vgic_irq *irq) { unsigned long cache_key = vgic_its_cache_key(devid, eventid); - struct vgic_dist *dist = &kvm->arch.vgic; - struct vgic_translation_cache_entry *cte; struct vgic_irq *old; - unsigned long flags; - phys_addr_t db; /* Do not cache a directly injected interrupt */ if (irq->hw) return; - raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); - - if (unlikely(list_empty(&dist->lpi_translation_cache))) - goto out; - - db = its->vgic_its_base + GITS_TRANSLATER; - - /* Always reuse the last entry (LRU policy) */ - cte = list_last_entry(&dist->lpi_translation_cache, - typeof(*cte), entry); - - /* - * Caching the translation implies having an extra reference - * to the interrupt, so drop the potential reference on what - * was in the cache, and increment it on the new interrupt. - */ - if (cte->irq) - vgic_put_irq(kvm, cte->irq); - /* * The irq refcount is guaranteed to be nonzero while holding the * its_lock, as the ITE (and the reference it holds) cannot be freed. */ lockdep_assert_held(&its->its_lock); - - /* - * Yes, two references are necessary at the moment: - * - One for the global LPI translation cache - * - Another for the translation cache belonging to @its - * - * This will soon disappear. - */ - vgic_get_irq_kref(irq); vgic_get_irq_kref(irq); - cte->db = db; - cte->devid = devid; - cte->eventid = eventid; - cte->irq = irq; - - /* Move the new translation to the head of the list */ - list_move(&cte->entry, &dist->lpi_translation_cache); - raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); - /* - * The per-ITS cache is a perfect cache, so it may already have an - * identical translation even if it were missing from the global - * cache. Ensure we don't leak a reference if that is the case. + * We could have raced with another CPU caching the same + * translation behind our back, ensure we don't leak a + * reference if that is the case. */ old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); if (old) vgic_put_irq(kvm, old); - -out: - raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); } static void vgic_its_invalidate_cache(struct vgic_its *its) { struct kvm *kvm = its->dev->kvm; - struct vgic_dist *dist = &kvm->arch.vgic; - struct vgic_translation_cache_entry *cte; - unsigned long flags, idx; struct vgic_irq *irq; - - raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); - - list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { - /* - * If we hit a NULL entry, there is nothing after this - * point. - */ - if (!cte->irq) - break; - - vgic_put_irq(kvm, cte->irq); - cte->irq = NULL; - } - - raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); + unsigned long idx; xa_for_each(&its->translation_cache, idx, irq) { xa_erase(&its->translation_cache, idx); @@ -1880,47 +1810,6 @@ out: return ret; } -/* Default is 16 cached LPIs per vcpu */ -#define LPI_DEFAULT_PCPU_CACHE_SIZE 16 - -void vgic_lpi_translation_cache_init(struct kvm *kvm) -{ - struct vgic_dist *dist = &kvm->arch.vgic; - unsigned int sz; - int i; - - if (!list_empty(&dist->lpi_translation_cache)) - return; - - sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE; - - for (i = 0; i < sz; i++) { - struct vgic_translation_cache_entry *cte; - - /* An allocation failure is not fatal */ - cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT); - if (WARN_ON(!cte)) - break; - - INIT_LIST_HEAD(&cte->entry); - list_add(&cte->entry, &dist->lpi_translation_cache); - } -} - -void vgic_lpi_translation_cache_destroy(struct kvm *kvm) -{ - struct vgic_dist *dist = &kvm->arch.vgic; - struct vgic_translation_cache_entry *cte, *tmp; - - vgic_its_invalidate_all_caches(kvm); - - list_for_each_entry_safe(cte, tmp, - &dist->lpi_translation_cache, entry) { - list_del(&cte->entry); - kfree(cte); - } -} - #define INITIAL_BASER_VALUE \ (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ @@ -1953,8 +1842,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) kfree(its); return ret; } - - vgic_lpi_translation_cache_init(dev->kvm); } mutex_init(&its->its_lock); diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index e5cda1eb4bcf..407640c24049 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -335,8 +335,6 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid, struct vgic_irq **irq); struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi); int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi); -void vgic_lpi_translation_cache_init(struct kvm *kvm); -void vgic_lpi_translation_cache_destroy(struct kvm *kvm); void vgic_its_invalidate_all_caches(struct kvm *kvm); /* GICv4.1 MMIO interface */ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index c15e7fcccb86..76ed097500c0 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -286,9 +286,6 @@ struct vgic_dist { #define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 struct xarray lpi_xa; - /* LPI translation cache */ - struct list_head lpi_translation_cache; - /* used by vgic-debug */ struct vgic_state_iter *iter; |