summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/vgic/vgic-v4.c
diff options
context:
space:
mode:
authorZenghui Yu <yuzenghui@huawei.com>2021-03-22 09:01:57 +0300
committerMarc Zyngier <maz@kernel.org>2021-03-24 21:12:21 +0300
commit12df7429213abbfa9632ab7db94f629ec309a58b (patch)
treeac0405d87befce445b00dd89c622910f9bc0e147 /arch/arm64/kvm/vgic/vgic-v4.c
parentf66b7b151e00427168409f8c1857970e926b1e27 (diff)
downloadlinux-12df7429213abbfa9632ab7db94f629ec309a58b.tar.xz
KVM: arm64: GICv4.1: Restore VLPI pending state to physical side
When setting the forwarding path of a VLPI (switch to the HW mode), we can also transfer the pending state from irq->pending_latch to VPT (especially in migration, the pending states of VLPIs are restored into kvm’s vgic first). And we currently send "INT+VSYNC" to trigger a VLPI to pending. Signed-off-by: Zenghui Yu <yuzenghui@huawei.com> Signed-off-by: Shenming Lu <lushenming@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210322060158.1584-6-lushenming@huawei.com
Diffstat (limited to 'arch/arm64/kvm/vgic/vgic-v4.c')
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index ac029ba3d337..c1845d8f5f7e 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -404,6 +404,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
struct vgic_its *its;
struct vgic_irq *irq;
struct its_vlpi_map map;
+ unsigned long flags;
int ret;
if (!vgic_supports_direct_msis(kvm))
@@ -449,6 +450,24 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
irq->host_irq = virq;
atomic_inc(&map.vpe->vlpi_count);
+ /* Transfer pending state */
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->pending_latch) {
+ ret = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
+ WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
+
+ /*
+ * Clear pending_latch and communicate this state
+ * change via vgic_queue_irq_unlock.
+ */
+ irq->pending_latch = false;
+ vgic_queue_irq_unlock(kvm, irq, flags);
+ } else {
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
out:
mutex_unlock(&its->its_lock);
return ret;