summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDapeng Mi <dapeng1.mi@linux.intel.com>2025-12-06 03:17:05 +0300
committerSean Christopherson <seanjc@google.com>2026-01-08 22:52:12 +0300
commit283a5aa57b2223abf2f73afcc714c4d4553660f2 (patch)
tree8dd131de56d6b3c6da0831b5dabf2485b4a50464
parentf7a65e58d64340c3c0e390ea4e1c4857cd451f1f (diff)
downloadlinux-283a5aa57b2223abf2f73afcc714c4d4553660f2.tar.xz
KVM: x86/pmu: Handle emulated instruction for mediated vPMU
Mediated vPMU needs to accumulate the emulated instructions into counter and load the counter into HW at vm-entry. Moreover, if the accumulation leads to counter overflow, KVM needs to update GLOBAL_STATUS and inject PMI into guest as well. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> Tested-by: Xudong Hao <xudong.hao@intel.com> Tested-by: Manali Shukla <manali.shukla@amd.com> Link: https://patch.msgid.link/20251206001720.468579-30-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/pmu.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index f6387c67b25c..b78ad897886d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -1031,10 +1031,45 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu);
}
+static bool pmc_is_pmi_enabled(struct kvm_pmc *pmc)
+{
+ u8 fixed_ctr_ctrl;
+
+ if (pmc_is_gp(pmc))
+ return pmc->eventsel & ARCH_PERFMON_EVENTSEL_INT;
+
+ fixed_ctr_ctrl = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
+ pmc->idx - KVM_FIXED_PMC_BASE_IDX);
+ return fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI;
+}
+
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{
- pmc->emulated_counter++;
- kvm_pmu_request_counter_reprogram(pmc);
+ struct kvm_vcpu *vcpu = pmc->vcpu;
+
+ /*
+ * For perf-based PMUs, accumulate software-emulated events separately
+ * from pmc->counter, as pmc->counter is offset by the count of the
+ * associated perf event. Request reprogramming, which will consult
+ * both emulated and hardware-generated events to detect overflow.
+ */
+ if (!kvm_vcpu_has_mediated_pmu(vcpu)) {
+ pmc->emulated_counter++;
+ kvm_pmu_request_counter_reprogram(pmc);
+ return;
+ }
+
+ /*
+ * For mediated PMUs, pmc->counter is updated when the vCPU's PMU is
+ * put, and will be loaded into hardware when the PMU is loaded. Simply
+ * increment the counter and signal overflow if it wraps to zero.
+ */
+ pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
+ if (!pmc->counter) {
+ pmc_to_pmu(pmc)->global_status |= BIT_ULL(pmc->idx);
+ if (pmc_is_pmi_enabled(pmc))
+ kvm_make_request(KVM_REQ_PMI, vcpu);
+ }
}
static inline bool cpl_is_matched(struct kvm_pmc *pmc)