diff options
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r-- | virt/kvm/arm/arm.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/trace.h | 1 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 12 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v4.c | 34 |
5 files changed, 18 insertions, 35 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index d65a0faa46d8..eda7b624eab8 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) guest_enter_irqoff(); if (has_vhe()) { - kvm_arm_vhe_guest_enter(); ret = kvm_vcpu_run_vhe(vcpu); - kvm_arm_vhe_guest_exit(); } else { ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); } diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h index 204d210d01c2..cc94ccc68821 100644 --- a/virt/kvm/arm/trace.h +++ b/virt/kvm/arm/trace.h @@ -4,6 +4,7 @@ #include <kvm/arm_arch_timer.h> #include <linux/tracepoint.h> +#include <asm/kvm_arm.h> #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index d656ebd5f9d4..97fb2a40e6ba 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -179,18 +179,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, return value; } -/* - * This function will return the VCPU that performed the MMIO access and - * trapped from within the VM, and will return NULL if this is a userspace - * access. - * - * We can disable preemption locally around accessing the per-CPU variable, - * and use the resolved vcpu pointer after enabling preemption again, because - * even if the current thread is migrated to another CPU, reading the per-CPU - * value later will give us the same value as we update the per-CPU variable - * in the preempt notifier handlers. - */ - /* Must be called with irq->irq_lock held */ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool is_uaccess) diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index f45635a6f0ec..1bc09b523486 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -595,7 +595,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) /* GICv4 support? */ if (info->has_v4) { kvm_vgic_global_state.has_gicv4 = gicv4_enable; - kvm_info("GICv4 support %sabled\n", + kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; + kvm_info("GICv4%s support %sabled\n", + kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", gicv4_enable ? "en" : "dis"); } diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c index 46f875589c47..1eb0f8c76219 100644 --- a/virt/kvm/arm/vgic/vgic-v4.c +++ b/virt/kvm/arm/vgic/vgic-v4.c @@ -67,10 +67,10 @@ * it. And if we've migrated our vcpu from one CPU to another, we must * tell the ITS (so that the messages reach the right redistributor). * This is done in two steps: first issue a irq_set_affinity() on the - * irq corresponding to the vcpu, then call its_schedule_vpe(). You - * must be in a non-preemptible context. On exit, another call to - * its_schedule_vpe() tells the redistributor that we're done with the - * vcpu. + * irq corresponding to the vcpu, then call its_make_vpe_resident(). + * You must be in a non-preemptible context. On exit, a call to + * its_make_vpe_non_resident() tells the redistributor that we're done + * with the vcpu. * * Finally, the doorbell handling: Each vcpu is allocated an interrupt * which will fire each time a VLPI is made pending whilst the vcpu is @@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) struct kvm_vcpu *vcpu = info; /* We got the message, no need to fire again */ - if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) + if (!kvm_vgic_global_state.has_gicv4_1 && + !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) disable_irq_nosync(irq); vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; @@ -199,19 +200,11 @@ void vgic_v4_teardown(struct kvm *kvm) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - struct irq_desc *desc = irq_to_desc(vpe->irq); if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) return 0; - /* - * If blocking, a doorbell is required. Undo the nested - * disable_irq() calls... - */ - while (need_db && irqd_irq_disabled(&desc->irq_data)) - enable_irq(vpe->irq); - - return its_schedule_vpe(vpe, false); + return its_make_vpe_non_resident(vpe, need_db); } int vgic_v4_load(struct kvm_vcpu *vcpu) @@ -232,18 +225,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) if (err) return err; - /* Disabled the doorbell, as we're about to enter the guest */ - disable_irq_nosync(vpe->irq); - - err = its_schedule_vpe(vpe, true); + err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled); if (err) return err; /* * Now that the VPE is resident, let's get rid of a potential - * doorbell interrupt that would still be pending. + * doorbell interrupt that would still be pending. This is a + * GICv4.0 only "feature"... */ - return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); + if (!kvm_vgic_global_state.has_gicv4_1) + err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); + + return err; } static struct vgic_its *vgic_get_its(struct kvm *kvm, |