summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2025-06-12 01:46:01 +0300
committerSean Christopherson <seanjc@google.com>2025-06-23 19:50:50 +0300
commit6eab2340f339cabb63079c94e5dbaea4d90007df (patch)
treef9932102d0745cec9db37ab7883337c6a2d96653
parentf2bc961d383bbc26c72f77e3f452da2f9f44dc0d (diff)
downloadlinux-6eab2340f339cabb63079c94e5dbaea4d90007df.tar.xz
KVM: SVM: Don't check vCPU's blocking status when toggling AVIC on/off
Don't query a vCPU's blocking status when toggling AVIC on/off; barring KVM bugs, the vCPU can't be blocking when refreshing AVIC controls. And if there are KVM bugs, ensuring the vCPU and its associated IRTEs are in the correct state is desirable, i.e. well worth any overhead in a buggy scenario. Isolating the "real" load/put flows will allow moving the IOMMU IRTE (de)activation logic from avic_refresh_apicv_exec_ctrl() to avic_update_iommu_vcpu_affinity(), i.e. will allow updating the vCPU's physical ID entry and its IRTEs in a common path, under a single critical section of ir_list_lock. Tested-by: Sairaj Kodilkar <sarunkod@amd.com> Link: https://lore.kernel.org/r/20250611224604.313496-60-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/svm/avic.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 3a59c5e77a4f..1a9862c7106e 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -829,7 +829,7 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
WARN_ON_ONCE(amd_iommu_update_ga(cpu, irqfd->irq_bypass_data));
}
-void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -846,16 +846,6 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
return;
/*
- * No need to update anything if the vCPU is blocking, i.e. if the vCPU
- * is being scheduled in after being preempted. The CPU entries in the
- * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
- * If the vCPU was migrated, its new CPU value will be stuffed when the
- * vCPU unblocks.
- */
- if (kvm_vcpu_is_blocking(vcpu))
- return;
-
- /*
* Grab the per-vCPU interrupt remapping lock even if the VM doesn't
* _currently_ have assigned devices, as that can change. Holding
* ir_list_lock ensures that either svm_ir_list_add() will consume
@@ -889,12 +879,27 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
-void avic_vcpu_put(struct kvm_vcpu *vcpu)
+void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ /*
+ * No need to update anything if the vCPU is blocking, i.e. if the vCPU
+ * is being scheduled in after being preempted. The CPU entries in the
+ * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
+ * If the vCPU was migrated, its new CPU value will be stuffed when the
+ * vCPU unblocks.
+ */
+ if (kvm_vcpu_is_blocking(vcpu))
+ return;
+
+ __avic_vcpu_load(vcpu, cpu);
+}
+
+static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
{
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long flags;
- u64 entry;
+ u64 entry = svm->avic_physical_id_entry;
lockdep_assert_preemption_disabled();
@@ -902,19 +907,6 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
return;
/*
- * Note, reading the Physical ID entry outside of ir_list_lock is safe
- * as only the pCPU that has loaded (or is loading) the vCPU is allowed
- * to modify the entry, and preemption is disabled. I.e. the vCPU
- * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
- * recursively.
- */
- entry = svm->avic_physical_id_entry;
-
- /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
- if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
- return;
-
- /*
* Take and hold the per-vCPU interrupt remapping lock while updating
* the Physical ID entry even though the lock doesn't protect against
* multiple writers (see above). Holding ir_list_lock ensures that
@@ -933,7 +925,24 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+}
+
+void avic_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, reading the Physical ID entry outside of ir_list_lock is safe
+ * as only the pCPU that has loaded (or is loading) the vCPU is allowed
+ * to modify the entry, and preemption is disabled. I.e. the vCPU
+ * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
+ * recursively.
+ */
+ u64 entry = to_svm(vcpu)->avic_physical_id_entry;
+
+ /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
+ if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+ return;
+ __avic_vcpu_put(vcpu);
}
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -974,9 +983,9 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
avic_refresh_virtual_apic_mode(vcpu);
if (activated)
- avic_vcpu_load(vcpu, vcpu->cpu);
+ __avic_vcpu_load(vcpu, vcpu->cpu);
else
- avic_vcpu_put(vcpu);
+ __avic_vcpu_put(vcpu);
/*
* Here, we go through the per-vcpu ir_list to update all existing