diff options
| -rw-r--r-- | arch/x86/kvm/svm/avic.c | 52 |
1 files changed, 23 insertions, 29 deletions
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 808700ce70c3..3a59c5e77a4f 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -729,34 +729,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu) avic_handle_ldr_update(vcpu); } -static void avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) -{ - int apic_id = kvm_cpu_get_apicid(vcpu->cpu); - unsigned long flags; - struct vcpu_svm *svm = to_svm(vcpu); - struct kvm_kernel_irqfd *irqfd; - - /* - * Here, we go through the per-vcpu ir_list to update all existing - * interrupt remapping table entry targeting this vcpu. - */ - spin_lock_irqsave(&svm->ir_list_lock, flags); - - if (list_empty(&svm->ir_list)) - goto out; - - list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { - void *data = irqfd->irq_bypass_data; - - if (activate) - WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id)); - else - WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); - } -out: - spin_unlock_irqrestore(&svm->ir_list_lock, flags); -} - static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd) { struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu; @@ -991,6 +963,10 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu) void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { bool activated = kvm_vcpu_apicv_active(vcpu); + int apic_id = kvm_cpu_get_apicid(vcpu->cpu); + struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_kernel_irqfd *irqfd; + unsigned long flags; if (!enable_apicv) return; @@ -1002,7 +978,25 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) else avic_vcpu_put(vcpu); - avic_set_pi_irte_mode(vcpu, activated); + /* + * Here, we go through the per-vcpu ir_list to update all existing + * interrupt remapping table entry targeting this vcpu. + */ + spin_lock_irqsave(&svm->ir_list_lock, flags); + + if (list_empty(&svm->ir_list)) + goto out; + + list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { + void *data = irqfd->irq_bypass_data; + + if (activated) + WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id)); + else + WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); + } +out: + spin_unlock_irqrestore(&svm->ir_list_lock, flags); } void avic_vcpu_blocking(struct kvm_vcpu *vcpu) |
