diff options
author | Will Deacon <will@kernel.org> | 2024-01-04 15:28:00 +0300 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2024-01-04 15:28:00 +0300 |
commit | dd9168ab08ebbf5643434c652a5f22ffa96dbdf3 (patch) | |
tree | 5a6a95f11a9321b558b260542b9cfb0a2be40d11 /arch | |
parent | 3b47bd8fed0446f4cc89e66d7a532b5f8b6633f3 (diff) | |
parent | bb339db4d363c84e0a8d70827df591397ccd7312 (diff) | |
download | linux-dd9168ab08ebbf5643434c652a5f22ffa96dbdf3.tar.xz |
Merge branch 'for-next/perf' into for-next/core
* for-next/perf: (30 commits)
arm: perf: Fix ARCH=arm build with GCC
MAINTAINERS: add maintainers for DesignWare PCIe PMU driver
drivers/perf: add DesignWare PCIe PMU driver
PCI: Move pci_clear_and_set_dword() helper to PCI header
PCI: Add Alibaba Vendor ID to linux/pci_ids.h
docs: perf: Add description for Synopsys DesignWare PCIe PMU driver
Revert "perf/arm_dmc620: Remove duplicate format attribute #defines"
Documentation: arm64: Document the PMU event counting threshold feature
arm64: perf: Add support for event counting threshold
arm: pmu: Move error message and -EOPNOTSUPP to individual PMUs
KVM: selftests: aarch64: Update tools copy of arm_pmuv3.h
perf/arm_dmc620: Remove duplicate format attribute #defines
arm: pmu: Share user ABI format mechanism with SPE
arm64: perf: Include threshold control fields in PMEVTYPER mask
arm: perf: Convert remaining fields to use GENMASK
arm: perf: Use GENMASK for PMMIR fields
arm: perf/kvm: Use GENMASK for ARMV8_PMU_PMCR_N
arm: perf: Remove inlines from arm_pmuv3.c
drivers/perf: arm_dsu_pmu: Remove kerneldoc-style comment syntax
drivers/perf: Remove usage of the deprecated ida_simple_xx() API
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 28 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 50 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 44 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 4 |
5 files changed, 22 insertions, 112 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1ae99deeec54..8fc080c9e4fb 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -268,10 +268,8 @@ static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) static void armv6pmu_enable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -294,12 +292,10 @@ static void armv6pmu_enable_event(struct perf_event *event) * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t @@ -362,26 +358,20 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) static void armv6pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -419,10 +409,8 @@ static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, static void armv6pmu_disable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -444,20 +432,16 @@ static void armv6pmu_disable_event(struct perf_event *event) * of ETM bus signal assertion cycles. The external reporting should * be disabled and so this should never increment. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6mpcore_pmu_disable_event(struct perf_event *event) { - unsigned long val, mask, flags, evt = 0; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt = 0; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -475,12 +459,10 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event) * Unlike UP ARMv6, we don't have a way of stopping the counters. We * simply disable the interrupt reporting. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv6_map_event(struct perf_event *event) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index eb2190477da1..a3322e2b3ea4 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -870,10 +870,8 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) static void armv7pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { @@ -886,7 +884,6 @@ static void armv7pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -910,16 +907,12 @@ static void armv7pmu_enable_event(struct perf_event *event) * Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { @@ -931,7 +924,6 @@ static void armv7pmu_disable_event(struct perf_event *event) /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -942,8 +934,6 @@ static void armv7pmu_disable_event(struct perf_event *event) * Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) @@ -1009,24 +999,14 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) static void armv7pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, @@ -1072,8 +1052,10 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (attr->exclude_idle) - return -EPERM; + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } if (attr->exclude_user) config_base |= ARMV7_EXCLUDE_USER; if (attr->exclude_kernel) @@ -1492,14 +1474,10 @@ static void krait_clearpmu(u32 config_base) static void krait_pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1512,23 +1490,17 @@ static void krait_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1548,8 +1520,6 @@ static void krait_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_reset(void *info) @@ -1825,14 +1795,10 @@ static void scorpion_clearpmu(u32 config_base) static void scorpion_pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1845,23 +1811,17 @@ static void scorpion_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1881,8 +1841,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_reset(void *info) diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f6cdcacfb96d..7a2ba1c689a7 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -203,10 +203,8 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) static void xscale1pmu_enable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { @@ -229,20 +227,16 @@ static void xscale1pmu_enable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_disable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { @@ -263,12 +257,10 @@ static void xscale1pmu_disable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -300,26 +292,20 @@ static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, static void xscale1pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val |= XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale1pmu_read_counter(struct perf_event *event) @@ -549,10 +535,8 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) static void xscale2pmu_enable_event(struct perf_event *event) { - unsigned long flags, ien, evtsel; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long ien, evtsel; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); @@ -587,18 +571,14 @@ static void xscale2pmu_enable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_disable_event(struct perf_event *event) { - unsigned long flags, ien, evtsel, of_flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long ien, evtsel, of_flags; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); @@ -638,11 +618,9 @@ static void xscale2pmu_disable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); xscale2pmu_write_overflow_flags(of_flags); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -663,26 +641,20 @@ out: static void xscale2pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val |= XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale2pmu_read_counter(struct perf_event *event) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index fe99b3dab6ce..3d9467ff73bc 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -267,9 +267,8 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu)); - val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) return BIT(ARMV8_PMU_CYCLE_IDX); else @@ -1136,8 +1135,7 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) */ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { - u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) & - ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); - return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); + return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4735e1b37fb3..ff45d688bd7d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -877,7 +877,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) u64 pmcr, val; pmcr = kvm_vcpu_read_pmcr(vcpu); - val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); return false; @@ -1143,7 +1143,7 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) { - u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val); struct kvm *kvm = vcpu->kvm; mutex_lock(&kvm->arch.config_lock); |