From 41a54482c010d8806cf56e1501bb3b61fac14cf9 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 18 May 2016 16:26:00 +0100 Subject: KVM: arm/arm64: Move timer IRQ map to latest possible time We are about to modify the VGIC to allocate all data structures dynamically and store mapped IRQ information on a per-IRQ struct, which is indeed allocated dynamically at init time. Therefore, we cannot record the mapped IRQ info from the timer at timer reset time like it's done now, because VCPU reset happens before timer init. A possible later time to do this is on the first run of a per VCPU, it just requires us to move the enable state to be a per-VCPU state and do the lookup of the physical IRQ number when we are about to run the VCPU. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara --- arch/arm/kvm/arm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kvm/arm.c') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index be4b6394a062..ceb9345bcf07 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - int ret; + int ret = 0; if (likely(vcpu->arch.has_run_once)) return 0; @@ -482,9 +482,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) * interrupts from the virtual timer with a userspace gic. */ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) - kvm_timer_enable(kvm); + ret = kvm_timer_enable(vcpu); - return 0; + return ret; } bool kvm_arch_intc_initialized(struct kvm *kvm) -- cgit v1.2.3 From b13216cf6010ef482e69875e00711de7f9573c80 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 27 Apr 2016 10:28:00 +0100 Subject: KVM: arm/arm64: Provide functionality to pause and resume a guest For some rare corner cases in our VGIC emulation later we have to stop the guest to make sure the VGIC state is consistent. Provide the necessary framework to pause and resume a guest. Signed-off-by: Christoffer Dall Signed-off-by: Andre Przywara --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm/kvm/arm.c | 25 +++++++++++++------------ arch/arm64/include/asm/kvm_host.h | 4 ++++ 3 files changed, 21 insertions(+), 12 deletions(-) (limited to 'arch/arm/kvm/arm.c') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 385070180c25..832be033e654 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -41,6 +41,8 @@ #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS +#define KVM_REQ_VCPU_EXIT 8 + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -225,6 +227,8 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); +void kvm_arm_halt_guest(struct kvm *kvm); +void kvm_arm_resume_guest(struct kvm *kvm); int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index ceb9345bcf07..e89329d22c18 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -492,30 +492,31 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) return vgic_initialized(kvm); } -static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused; -static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused; - -static void kvm_arm_halt_guest(struct kvm *kvm) +void kvm_arm_halt_guest(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) vcpu->arch.pause = true; - force_vm_exit(cpu_all_mask); + kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT); +} + +static void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu) +{ + struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); + + vcpu->arch.pause = false; + swake_up(wq); } -static void kvm_arm_resume_guest(struct kvm *kvm) +void kvm_arm_resume_guest(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; - kvm_for_each_vcpu(i, vcpu, kvm) { - struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); - - vcpu->arch.pause = false; - swake_up(wq); - } + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_arm_resume_vcpu(vcpu); } static void vcpu_sleep(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4cd4196e94a9..fa94f91812ab 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -44,6 +44,8 @@ #define KVM_VCPU_MAX_FEATURES 4 +#define KVM_REQ_VCPU_EXIT 8 + int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(long ext); @@ -325,6 +327,8 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); +void kvm_arm_halt_guest(struct kvm *kvm); +void kvm_arm_resume_guest(struct kvm *kvm); u64 __kvm_call_hyp(void *hypfn, ...); #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) -- cgit v1.2.3 From 35a2d58588f0992627e74b447ccab21570544c86 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 20 May 2016 15:25:28 +0200 Subject: KVM: arm/arm64: vgic-new: Synchronize changes to active state When modifying the active state of an interrupt via the MMIO interface, we should ensure that the write has the intended effect. If a guest sets an interrupt to active, but that interrupt is already flushed into a list register on a running VCPU, then that VCPU will write the active state back into the struct vgic_irq upon returning from the guest and syncing its state. This is a non-benign race, because the guest can observe that an interrupt is not active, and it can have a reasonable expectations that other VCPUs will not ack any IRQs, and then set the state to active, and expect it to stay that way. Currently we are not honoring this case. Thefore, change both the SACTIVE and CACTIVE mmio handlers to stop the world, change the irq state, potentially queue the irq if we're setting it to active, and then continue. We take this chance to slightly optimize these functions by not stopping the world when touching private interrupts where there is inherently no possible race. Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_host.h | 2 + arch/arm/kvm/arm.c | 8 ++- arch/arm64/include/asm/kvm_host.h | 2 + virt/kvm/arm/vgic/vgic-mmio.c | 105 ++++++++++++++++++++++++-------------- 4 files changed, 77 insertions(+), 40 deletions(-) (limited to 'arch/arm/kvm/arm.c') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 832be033e654..987da9fb14d8 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -229,6 +229,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu); +void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu); int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index e89329d22c18..5a599c208f23 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -502,7 +502,13 @@ void kvm_arm_halt_guest(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT); } -static void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu) +void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pause = true; + kvm_vcpu_kick(vcpu); +} + +void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu) { struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fa94f91812ab..38746d24466d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -329,6 +329,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu); +void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu); u64 __kvm_call_hyp(void *hypfn, ...); #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 4ef35719fcbe..059595ec3da0 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -173,6 +173,66 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, return value; } +static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + bool new_active_state) +{ + spin_lock(&irq->irq_lock); + /* + * If this virtual IRQ was written into a list register, we + * have to make sure the CPU that runs the VCPU thread has + * synced back LR state to the struct vgic_irq. We can only + * know this for sure, when either this irq is not assigned to + * anyone's AP list anymore, or the VCPU thread is not + * running on any CPUs. + * + * In the opposite case, we know the VCPU thread may be on its + * way back from the guest and still has to sync back this + * IRQ, so we release and re-acquire the spin_lock to let the + * other thread sync back the IRQ. + */ + while (irq->vcpu && /* IRQ may have state in an LR somewhere */ + irq->vcpu->cpu != -1) { /* VCPU thread is running */ + BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); + cond_resched_lock(&irq->irq_lock); + } + + irq->active = new_active_state; + if (new_active_state) + vgic_queue_irq_unlock(vcpu->kvm, irq); + else + spin_unlock(&irq->irq_lock); +} + +/* + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ + * is not queued on some running VCPU's LRs, because then the change to the + * active state can be overwritten when the VCPU's state is synced coming back + * from the guest. + * + * For shared interrupts, we have to stop all the VCPUs because interrupts can + * be migrated while we don't hold the IRQ locks and we don't want to be + * chasing moving targets. + * + * For private interrupts, we only have to make sure the single and only VCPU + * that can potentially queue the IRQ is stopped. + */ +static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) +{ + if (intid < VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_vcpu(vcpu); + else + kvm_arm_halt_guest(vcpu->kvm); +} + +/* See vgic_change_active_prepare */ +static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) +{ + if (intid < VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_vcpu(vcpu); + else + kvm_arm_resume_guest(vcpu->kvm); +} + void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -180,32 +240,12 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; - kvm_arm_halt_guest(vcpu->kvm); + vgic_change_active_prepare(vcpu, intid); for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - - spin_lock(&irq->irq_lock); - /* - * If this virtual IRQ was written into a list register, we - * have to make sure the CPU that runs the VCPU thread has - * synced back LR state to the struct vgic_irq. We can only - * know this for sure, when either this irq is not assigned to - * anyone's AP list anymore, or the VCPU thread is not - * running on any CPUs. - * - * In the opposite case, we know the VCPU thread may be on its - * way back from the guest and still has to sync back this - * IRQ, so we release and re-acquire the spin_lock to let the - * other thread sync back the IRQ. - */ - while (irq->vcpu && /* IRQ may have state in an LR somewhere */ - irq->vcpu->cpu != -1) /* VCPU thread is running */ - cond_resched_lock(&irq->irq_lock); - - irq->active = false; - spin_unlock(&irq->irq_lock); + vgic_mmio_change_active(vcpu, irq, false); } - kvm_arm_resume_guest(vcpu->kvm); + vgic_change_active_finish(vcpu, intid); } void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, @@ -215,25 +255,12 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + vgic_change_active_prepare(vcpu, intid); for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - - spin_lock(&irq->irq_lock); - - /* - * If the IRQ was already active or there is no target VCPU - * assigned at the moment, then just proceed. - */ - if (irq->active || !irq->target_vcpu) { - irq->active = true; - - spin_unlock(&irq->irq_lock); - continue; - } - - irq->active = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_mmio_change_active(vcpu, irq, true); } + vgic_change_active_finish(vcpu, intid); } unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, -- cgit v1.2.3