summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDapeng Mi <dapeng1.mi@linux.intel.com>2025-12-06 03:17:01 +0300
committerSean Christopherson <seanjc@google.com>2026-01-08 22:52:10 +0300
commit3db871fe185baca66e78b56a230e236af40f1027 (patch)
tree00ed33319c657a95e5863b469cfb0b7dfe6a4a7b
parent02918f0077925994b04be147875b6de8b63ca249 (diff)
downloadlinux-3db871fe185baca66e78b56a230e236af40f1027.tar.xz
KVM: x86/pmu: Reprogram mediated PMU event selectors on event filter updates
Refresh the event selectors that are programmed into hardware when a PMC is "reprogrammed" for a mediated PMU, i.e. if userspace changes the PMU event filters Note, KVM doesn't utilize the reprogramming infrastructure to handle counter overflow for mediated PMUs, as there's no need to reprogram a non-existent perf event. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Co-developed-by: Mingwei Zhang <mizhang@google.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> [sean: add a helper to document behavior, split patch and rewrite changelog] Tested-by: Xudong Hao <xudong.hao@intel.com> Tested-by: Manali Shukla <manali.shukla@amd.com> Link: https://patch.msgid.link/20251206001720.468579-26-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/pmu.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a05366e4eef2..24f5c14715ef 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -520,6 +520,25 @@ static bool pmc_is_event_allowed(struct kvm_pmc *pmc)
return is_fixed_event_allowed(filter, pmc->idx);
}
+static void kvm_mediated_pmu_refresh_event_filter(struct kvm_pmc *pmc)
+{
+ bool allowed = pmc_is_event_allowed(pmc);
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (pmc_is_gp(pmc)) {
+ pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+ if (allowed)
+ pmc->eventsel_hw |= pmc->eventsel &
+ ARCH_PERFMON_EVENTSEL_ENABLE;
+ } else {
+ u64 mask = intel_fixed_bits_by_idx(pmc->idx - KVM_FIXED_PMC_BASE_IDX, 0xf);
+
+ pmu->fixed_ctr_ctrl_hw &= ~mask;
+ if (allowed)
+ pmu->fixed_ctr_ctrl_hw |= pmu->fixed_ctr_ctrl & mask;
+ }
+}
+
static int reprogram_counter(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -528,6 +547,11 @@ static int reprogram_counter(struct kvm_pmc *pmc)
bool emulate_overflow;
u8 fixed_ctr_ctrl;
+ if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) {
+ kvm_mediated_pmu_refresh_event_filter(pmc);
+ return 0;
+ }
+
emulate_overflow = pmc_pause_counter(pmc);
if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||