summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMingwei Zhang <mizhang@google.com>2025-12-06 03:17:07 +0300
committerSean Christopherson <seanjc@google.com>2026-01-08 22:52:14 +0300
commit88ebc2a3199cb5f16aff20673ed97b63a4295989 (patch)
tree0f7481514212252b15fe62a9e74fc8474ff1450b
parentcb58327c4c8ad9e81d3a2f17adaf3ab57066f369 (diff)
downloadlinux-88ebc2a3199cb5f16aff20673ed97b63a4295989.tar.xz
KVM: nVMX: Disable PMU MSR interception as appropriate while running L2
Merge KVM's PMU MSR interception bitmaps with those of L1, i.e. merge the bitmaps of vmcs01 and vmcs12, e.g. so that KVM doesn't interpose on MSR accesses unnecessarily if L1 exposes a mediated PMU (or equivalent) to L2. Signed-off-by: Mingwei Zhang <mizhang@google.com> Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> [sean: rewrite changelog and comment, omit MSRs that are always intercepted] Tested-by: Xudong Hao <xudong.hao@intel.com> Tested-by: Manali Shukla <manali.shukla@amd.com> Link: https://patch.msgid.link/20251206001720.468579-32-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/vmx/nested.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b56ed2b1ac67..729cc1f05ac8 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -630,6 +630,34 @@ static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
#define nested_vmx_merge_msr_bitmaps_rw(msr) \
nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
+static void nested_vmx_merge_pmu_msr_bitmaps(struct kvm_vcpu *vcpu,
+ unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_l0)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int i;
+
+ /*
+ * Skip the merges if the vCPU doesn't have a mediated PMU MSR, i.e. if
+ * none of the MSRs can possibly be passed through to L1.
+ */
+ if (!kvm_vcpu_has_mediated_pmu(vcpu))
+ return;
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PERFCTR0 + i);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PMC0 + i);
+ }
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_FIXED_CTR0 + i);
+
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_GLOBAL_CTRL);
+ nested_vmx_merge_msr_bitmaps_read(MSR_CORE_PERF_GLOBAL_STATUS);
+ nested_vmx_merge_msr_bitmaps_write(MSR_CORE_PERF_GLOBAL_OVF_CTRL);
+}
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@@ -745,6 +773,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_PL3_SSP, MSR_TYPE_RW);
+ nested_vmx_merge_pmu_msr_bitmaps(vcpu, msr_bitmap_l1, msr_bitmap_l0);
+
kvm_vcpu_unmap(vcpu, &map);
vmx->nested.force_msr_bitmap_recalc = false;