diff options
| author | Sean Christopherson <seanjc@google.com> | 2025-12-06 03:17:17 +0300 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2026-01-08 22:52:21 +0300 |
| commit | 2239d137a71d77c7610434473b0c8cfde90d4116 (patch) | |
| tree | 678f997656e4d32a324e313c70316644f46bd20a | |
| parent | 0c4ff0866fc1b0bf8c1d8d5f27fedc6dd9c51183 (diff) | |
| download | linux-2239d137a71d77c7610434473b0c8cfde90d4116.tar.xz | |
KVM: VMX: Compartmentalize adding MSRs to host vs. guest auto-load list
Undo the bundling of the "host" and "guest" MSR auto-load list logic so
that the code can be deduplicated by factoring out the logic to a separate
helper. Now that "list full" situations are treated as fatal to the VM,
there is no need to pre-check both lists.
For all intents and purposes, this reverts the add_atomic_switch_msr()
changes made by commit 3190709335dd ("x86/KVM/VMX: Separate the VMX
AUTOLOAD guest/host number accounting").
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-42-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
| -rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index be2a2580e8f1..018e01daab68 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1096,9 +1096,9 @@ static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { - int i, j = 0; struct msr_autoload *m = &vmx->msr_autoload; struct kvm *kvm = vmx->vcpu.kvm; + int i; switch (msr) { case MSR_EFER: @@ -1133,25 +1133,26 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, } i = vmx_find_loadstore_msr_slot(&m->guest, msr); - j = vmx_find_loadstore_msr_slot(&m->host, msr); - - if (KVM_BUG_ON(i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm) || - KVM_BUG_ON(j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm)) - return; - if (i < 0) { + if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm)) + return; + i = m->guest.nr++; m->guest.val[i].index = msr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); } m->guest.val[i].value = guest_val; - if (j < 0) { - j = m->host.nr++; - m->host.val[j].index = msr; + i = vmx_find_loadstore_msr_slot(&m->host, msr); + if (i < 0) { + if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm)) + return; + + i = m->host.nr++; + m->host.val[i].index = msr; vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } - m->host.val[j].value = host_val; + m->host.val[i].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx) |
