diff options
| author | Sean Christopherson <seanjc@google.com> | 2025-12-06 03:17:18 +0300 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2026-01-08 22:52:22 +0300 |
| commit | c3d6a7210a4de909683a36779f5b8567f79a3688 (patch) | |
| tree | 6376f2f9948b7c692a8625a9a758ecb6ecbce1bf | |
| parent | 2239d137a71d77c7610434473b0c8cfde90d4116 (diff) | |
| download | linux-c3d6a7210a4de909683a36779f5b8567f79a3688.tar.xz | |
KVM: VMX: Dedup code for adding MSR to VMCS's auto list
Add a helper to add an MSR to a VMCS's "auto" list to deduplicate the code
in add_atomic_switch_msr(), and so that the functionality can be used in
the future for managing the MSR auto-store list.
No functional change intended.
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-43-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
| -rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 018e01daab68..3f64d4b1b19c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1093,12 +1093,28 @@ static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_setbit(vmx, exit); } +static void vmx_add_auto_msr(struct vmx_msrs *m, u32 msr, u64 value, + unsigned long vmcs_count_field, struct kvm *kvm) +{ + int i; + + i = vmx_find_loadstore_msr_slot(m, msr); + if (i < 0) { + if (KVM_BUG_ON(m->nr == MAX_NR_LOADSTORE_MSRS, kvm)) + return; + + i = m->nr++; + m->val[i].index = msr; + vmcs_write32(vmcs_count_field, m->nr); + } + m->val[i].value = value; +} + static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { struct msr_autoload *m = &vmx->msr_autoload; struct kvm *kvm = vmx->vcpu.kvm; - int i; switch (msr) { case MSR_EFER: @@ -1132,27 +1148,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, wrmsrq(MSR_IA32_PEBS_ENABLE, 0); } - i = vmx_find_loadstore_msr_slot(&m->guest, msr); - if (i < 0) { - if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm)) - return; - - i = m->guest.nr++; - m->guest.val[i].index = msr; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); - } - m->guest.val[i].value = guest_val; - - i = vmx_find_loadstore_msr_slot(&m->host, msr); - if (i < 0) { - if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm)) - return; - - i = m->host.nr++; - m->host.val[i].index = msr; - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); - } - m->host.val[i].value = host_val; + vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm); + vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm); } static bool update_transition_efer(struct vcpu_vmx *vmx) |
