diff options
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 86 | 
1 files changed, 86 insertions, 0 deletions
| diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index d3e90a9d0a7a..e61d3ea0ab40 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -535,6 +535,90 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,  	return 0;  } +static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist, +					       phys_addr_t db, +					       u32 devid, u32 eventid) +{ +	struct vgic_translation_cache_entry *cte; + +	list_for_each_entry(cte, &dist->lpi_translation_cache, entry) { +		/* +		 * If we hit a NULL entry, there is nothing after this +		 * point. +		 */ +		if (!cte->irq) +			break; + +		if (cte->db != db || cte->devid != devid || +		    cte->eventid != eventid) +			continue; + +		/* +		 * Move this entry to the head, as it is the most +		 * recently used. +		 */ +		if (!list_is_first(&cte->entry, &dist->lpi_translation_cache)) +			list_move(&cte->entry, &dist->lpi_translation_cache); + +		return cte->irq; +	} + +	return NULL; +} + +static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, +				       u32 devid, u32 eventid, +				       struct vgic_irq *irq) +{ +	struct vgic_dist *dist = &kvm->arch.vgic; +	struct vgic_translation_cache_entry *cte; +	unsigned long flags; +	phys_addr_t db; + +	/* Do not cache a directly injected interrupt */ +	if (irq->hw) +		return; + +	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); + +	if (unlikely(list_empty(&dist->lpi_translation_cache))) +		goto out; + +	/* +	 * We could have raced with another CPU caching the same +	 * translation behind our back, so let's check it is not in +	 * already +	 */ +	db = its->vgic_its_base + GITS_TRANSLATER; +	if (__vgic_its_check_cache(dist, db, devid, eventid)) +		goto out; + +	/* Always reuse the last entry (LRU policy) */ +	cte = list_last_entry(&dist->lpi_translation_cache, +			      typeof(*cte), entry); + +	/* +	 * Caching the translation implies having an extra reference +	 * to the interrupt, so drop the potential reference on what +	 * was in the cache, and increment it on the new interrupt. +	 */ +	if (cte->irq) +		__vgic_put_lpi_locked(kvm, cte->irq); + +	vgic_get_irq_kref(irq); + +	cte->db		= db; +	cte->devid	= devid; +	cte->eventid	= eventid; +	cte->irq	= irq; + +	/* Move the new translation to the head of the list */ +	list_move(&cte->entry, &dist->lpi_translation_cache); + +out: +	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); +} +  void vgic_its_invalidate_cache(struct kvm *kvm)  {  	struct vgic_dist *dist = &kvm->arch.vgic; @@ -578,6 +662,8 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,  	if (!vcpu->arch.vgic_cpu.lpis_enabled)  		return -EBUSY; +	vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq); +  	*irq = ite->irq;  	return 0;  } | 
