summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pmu-emul.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2023-05-26 00:27:21 +0300
committerMarc Zyngier <maz@kernel.org>2023-05-31 12:29:56 +0300
commit1c913a1c35aa61cf280173b2bcc133c3953c38fc (patch)
tree8a8dd2a1feeeec1d0659a02a4b5cf7b5f0748287 /arch/arm64/kvm/pmu-emul.c
parentf6a27d6dc51b288106adaf053cff9c9b9cc12c4e (diff)
downloadlinux-1c913a1c35aa61cf280173b2bcc133c3953c38fc.tar.xz
KVM: arm64: Iterate arm_pmus list to probe for default PMU
To date KVM has relied on using a perf event to probe the core PMU at the time of vPMU initialization. Behind the scenes perf_event_init() would iteratively walk the PMUs of the system and return the PMU that could handle the event. However, an upcoming change in perf core will drop the iterative walk, thereby breaking the fragile dance we do on the KVM side. Avoid the problem altogether by iterating over the list of supported PMUs maintained in KVM, returning the core PMU that matches the CPU we were called on. Tested-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230525212723.3361524-2-oliver.upton@linux.dev
Diffstat (limited to 'arch/arm64/kvm/pmu-emul.c')
-rw-r--r--arch/arm64/kvm/pmu-emul.c46
1 files changed, 12 insertions, 34 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 45727d50d18d..5deddc49e745 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -694,45 +694,23 @@ out_unlock:
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
- struct perf_event_attr attr = { };
- struct perf_event *event;
- struct arm_pmu *pmu = NULL;
-
- /*
- * Create a dummy event that only counts user cycles. As we'll never
- * leave this function with the event being live, it will never
- * count anything. But it allows us to probe some of the PMU
- * details. Yes, this is terrible.
- */
- attr.type = PERF_TYPE_RAW;
- attr.size = sizeof(attr);
- attr.pinned = 1;
- attr.disabled = 0;
- attr.exclude_user = 0;
- attr.exclude_kernel = 1;
- attr.exclude_hv = 1;
- attr.exclude_host = 1;
- attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
- attr.sample_period = GENMASK(63, 0);
+ struct arm_pmu *tmp, *pmu = NULL;
+ struct arm_pmu_entry *entry;
+ int cpu;
- event = perf_event_create_kernel_counter(&attr, -1, current,
- kvm_pmu_perf_overflow, &attr);
+ mutex_lock(&arm_pmus_lock);
- if (IS_ERR(event)) {
- pr_err_once("kvm: pmu event creation failed %ld\n",
- PTR_ERR(event));
- return NULL;
- }
+ cpu = smp_processor_id();
+ list_for_each_entry(entry, &arm_pmus, entry) {
+ tmp = entry->arm_pmu;
- if (event->pmu) {
- pmu = to_arm_pmu(event->pmu);
- if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
- pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
- pmu = NULL;
+ if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
+ pmu = tmp;
+ break;
+ }
}
- perf_event_disable(event);
- perf_event_release_kernel(event);
+ mutex_unlock(&arm_pmus_lock);
return pmu;
}