diff options
author | Like Xu <likexu@tencent.com> | 2022-05-18 16:25:03 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-06-08 11:48:45 +0300 |
commit | 89cb454ea984d0411523dc10e70e9bf0aca1b527 (patch) | |
tree | 383624a737a2eeab7a48e144636c8c498e4aa075 /arch/x86/kvm/pmu.c | |
parent | a33095f4937b362306f8636742450cff1c4630af (diff) | |
download | linux-89cb454ea984d0411523dc10e70e9bf0aca1b527.tar.xz |
KVM: x86/pmu: Extract check_pmu_event_filter() handling both GP and fixed counters
Checking the kvm->arch.pmu_event_filter policy in both gp and fixed
code paths was somewhat redundant, so common parts can be extracted,
which reduces code footprint and improves readability.
Signed-off-by: Like Xu <likexu@tencent.com>
Reviewed-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <20220518132512.37864-3-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/pmu.c')
-rw-r--r-- | arch/x86/kvm/pmu.c | 63 |
1 files changed, 37 insertions, 26 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 72512a33a04e..ee6b2895faed 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -250,14 +250,44 @@ static int cmp_u64(const void *pa, const void *pb) return (a > b) - (a < b); } +static bool check_pmu_event_filter(struct kvm_pmc *pmc) +{ + struct kvm_pmu_event_filter *filter; + struct kvm *kvm = pmc->vcpu->kvm; + bool allow_event = true; + __u64 key; + int idx; + + filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); + if (!filter) + goto out; + + if (pmc_is_gp(pmc)) { + key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB; + if (bsearch(&key, filter->events, filter->nevents, + sizeof(__u64), cmp_u64)) + allow_event = filter->action == KVM_PMU_EVENT_ALLOW; + else + allow_event = filter->action == KVM_PMU_EVENT_DENY; + } else { + idx = pmc->idx - INTEL_PMC_IDX_FIXED; + if (filter->action == KVM_PMU_EVENT_DENY && + test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) + allow_event = false; + if (filter->action == KVM_PMU_EVENT_ALLOW && + !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) + allow_event = false; + } + +out: + return allow_event; +} + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { u64 config; u32 type = PERF_TYPE_RAW; - struct kvm *kvm = pmc->vcpu->kvm; - struct kvm_pmu_event_filter *filter; - struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); - bool allow_event = true; + struct kvm_pmu *pmu = pmc_to_pmu(pmc); if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); @@ -269,17 +299,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) return; - filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); - if (filter) { - __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB; - - if (bsearch(&key, filter->events, filter->nevents, - sizeof(__u64), cmp_u64)) - allow_event = filter->action == KVM_PMU_EVENT_ALLOW; - else - allow_event = filter->action == KVM_PMU_EVENT_DENY; - } - if (!allow_event) + if (!check_pmu_event_filter(pmc)) return; if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | @@ -312,23 +332,14 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) { unsigned en_field = ctrl & 0x3; bool pmi = ctrl & 0x8; - struct kvm_pmu_event_filter *filter; - struct kvm *kvm = pmc->vcpu->kvm; pmc_pause_counter(pmc); if (!en_field || !pmc_is_enabled(pmc)) return; - filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); - if (filter) { - if (filter->action == KVM_PMU_EVENT_DENY && - test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) - return; - if (filter->action == KVM_PMU_EVENT_ALLOW && - !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) - return; - } + if (!check_pmu_event_filter(pmc)) + return; if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) return; |