summaryrefslogtreecommitdiff
path: root/drivers/perf/arm_pmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 20:00:51 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 20:00:51 +0300
commit6734e20e39207556e17d72b5c4950d8f3a4f2de2 (patch)
treed11d83bdfd4a5d0584e4a1d1150a7b9e0070f2f0 /drivers/perf/arm_pmu.c
parentd04a248f1f6cb4bcd8e38b6894bd4f9dc64b6aa8 (diff)
parentd13027bb35e089bc1bb9f19c4976decf32a09b97 (diff)
downloadlinux-6734e20e39207556e17d72b5c4950d8f3a4f2de2.tar.xz
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "There's quite a lot of code here, but much of it is due to the addition of a new PMU driver as well as some arm64-specific selftests which is an area where we've traditionally been lagging a bit. In terms of exciting features, this includes support for the Memory Tagging Extension which narrowly missed 5.9, hopefully allowing userspace to run with use-after-free detection in production on CPUs that support it. Work is ongoing to integrate the feature with KASAN for 5.11. Another change that I'm excited about (assuming they get the hardware right) is preparing the ASID allocator for sharing the CPU page-table with the SMMU. Those changes will also come in via Joerg with the IOMMU pull. We do stray outside of our usual directories in a few places, mostly due to core changes required by MTE. Although much of this has been Acked, there were a couple of places where we unfortunately didn't get any review feedback. Other than that, we ran into a handful of minor conflicts in -next, but nothing that should post any issues. Summary: - Userspace support for the Memory Tagging Extension introduced by Armv8.5. Kernel support (via KASAN) is likely to follow in 5.11. - Selftests for MTE, Pointer Authentication and FPSIMD/SVE context switching. - Fix and subsequent rewrite of our Spectre mitigations, including the addition of support for PR_SPEC_DISABLE_NOEXEC. - Support for the Armv8.3 Pointer Authentication enhancements. - Support for ASID pinning, which is required when sharing page-tables with the SMMU. - MM updates, including treating flush_tlb_fix_spurious_fault() as a no-op. - Perf/PMU driver updates, including addition of the ARM CMN PMU driver and also support to handle CPU PMU IRQs as NMIs. - Allow prefetchable PCI BARs to be exposed to userspace using normal non-cacheable mappings. - Implementation of ARCH_STACKWALK for unwinding. - Improve reporting of unexpected kernel traps due to BPF JIT failure. - Improve robustness of user-visible HWCAP strings and their corresponding numerical constants. - Removal of TEXT_OFFSET. - Removal of some unused functions, parameters and prototypes. - Removal of MPIDR-based topology detection in favour of firmware description. - Cleanups to handling of SVE and FPSIMD register state in preparation for potential future optimisation of handling across syscalls. - Cleanups to the SDEI driver in preparation for support in KVM. - Miscellaneous cleanups and refactoring work" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (148 commits) Revert "arm64: initialize per-cpu offsets earlier" arm64: random: Remove no longer needed prototypes arm64: initialize per-cpu offsets earlier kselftest/arm64: Check mte tagged user address in kernel kselftest/arm64: Verify KSM page merge for MTE pages kselftest/arm64: Verify all different mmap MTE options kselftest/arm64: Check forked child mte memory accessibility kselftest/arm64: Verify mte tag inclusion via prctl kselftest/arm64: Add utilities and a test to validate mte memory perf: arm-cmn: Fix conversion specifiers for node type perf: arm-cmn: Fix unsigned comparison to less than zero arm64: dbm: Invalidate local TLB when setting TCR_EL1.HD arm64: mm: Make flush_tlb_fix_spurious_fault() a no-op arm64: Add support for PR_SPEC_DISABLE_NOEXEC prctl() option arm64: Pull in task_stack_page() to Spectre-v4 mitigation code KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled arm64: Get rid of arm64_ssbd_state KVM: arm64: Convert ARCH_WORKAROUND_2 to arm64_get_spectre_v4_state() KVM: arm64: Get rid of kvm_arm_have_ssbd() KVM: arm64: Simplify handling of ARCH_WORKAROUND_2 ...
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r--drivers/perf/arm_pmu.c155
1 files changed, 134 insertions, 21 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df352b334ea7..cb2f55f450e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -26,8 +26,84 @@
#include <asm/irq_regs.h>
+static int armpmu_count_irq_users(const int irq);
+
+struct pmu_irq_ops {
+ void (*enable_pmuirq)(unsigned int irq);
+ void (*disable_pmuirq)(unsigned int irq);
+ void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
+};
+
+static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
+{
+ free_irq(irq, per_cpu_ptr(devid, cpu));
+}
+
+static const struct pmu_irq_ops pmuirq_ops = {
+ .enable_pmuirq = enable_irq,
+ .disable_pmuirq = disable_irq_nosync,
+ .free_pmuirq = armpmu_free_pmuirq
+};
+
+static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
+{
+ free_nmi(irq, per_cpu_ptr(devid, cpu));
+}
+
+static const struct pmu_irq_ops pmunmi_ops = {
+ .enable_pmuirq = enable_nmi,
+ .disable_pmuirq = disable_nmi_nosync,
+ .free_pmuirq = armpmu_free_pmunmi
+};
+
+static void armpmu_enable_percpu_pmuirq(unsigned int irq)
+{
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
+ void __percpu *devid)
+{
+ if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_irq(irq, devid);
+}
+
+static const struct pmu_irq_ops percpu_pmuirq_ops = {
+ .enable_pmuirq = armpmu_enable_percpu_pmuirq,
+ .disable_pmuirq = disable_percpu_irq,
+ .free_pmuirq = armpmu_free_percpu_pmuirq
+};
+
+static void armpmu_enable_percpu_pmunmi(unsigned int irq)
+{
+ if (!prepare_percpu_nmi(irq))
+ enable_percpu_nmi(irq, IRQ_TYPE_NONE);
+}
+
+static void armpmu_disable_percpu_pmunmi(unsigned int irq)
+{
+ disable_percpu_nmi(irq);
+ teardown_percpu_nmi(irq);
+}
+
+static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
+ void __percpu *devid)
+{
+ if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_nmi(irq, devid);
+}
+
+static const struct pmu_irq_ops percpu_pmunmi_ops = {
+ .enable_pmuirq = armpmu_enable_percpu_pmunmi,
+ .disable_pmuirq = armpmu_disable_percpu_pmunmi,
+ .free_pmuirq = armpmu_free_percpu_pmunmi
+};
+
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
+
+static bool has_nmi;
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
@@ -544,6 +620,23 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
+static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+{
+ const struct pmu_irq_ops *ops = NULL;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(cpu_irq, cpu) != irq)
+ continue;
+
+ ops = per_cpu(cpu_irq_ops, cpu);
+ if (ops)
+ break;
+ }
+
+ return ops;
+}
+
void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
@@ -551,18 +644,18 @@ void armpmu_free_irq(int irq, int cpu)
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
- if (!irq_is_percpu_devid(irq))
- free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
- else if (armpmu_count_irq_users(irq) == 1)
- free_percpu_irq(irq, &cpu_armpmu);
+ per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
+ per_cpu(cpu_irq_ops, cpu) = NULL;
}
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
+ const struct pmu_irq_ops *irq_ops;
+
if (!irq)
return 0;
@@ -582,17 +675,44 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NO_THREAD;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
- err = request_irq(irq, handler, irq_flags, "arm-pmu",
+
+ err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
+
+ /* If cannot get an NMI, get a normal interrupt */
+ if (err) {
+ err = request_irq(irq, handler, irq_flags, "arm-pmu",
+ per_cpu_ptr(&cpu_armpmu, cpu));
+ irq_ops = &pmuirq_ops;
+ } else {
+ has_nmi = true;
+ irq_ops = &pmunmi_ops;
+ }
} else if (armpmu_count_irq_users(irq) == 0) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &cpu_armpmu);
+ err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
+
+ /* If cannot get an NMI, get a normal interrupt */
+ if (err) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &cpu_armpmu);
+ irq_ops = &percpu_pmuirq_ops;
+ } else {
+ has_nmi= true;
+ irq_ops = &percpu_pmunmi_ops;
+ }
+ } else {
+ /* Per cpudevid irq was already requested by another CPU */
+ irq_ops = armpmu_find_irq_ops(irq);
+
+ if (WARN_ON(!irq_ops))
+ err = -EINVAL;
}
if (err)
goto err_out;
per_cpu(cpu_irq, cpu) = irq;
+ per_cpu(cpu_irq_ops, cpu) = irq_ops;
return 0;
err_out:
@@ -625,12 +745,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
- else
- enable_irq(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
return 0;
}
@@ -644,12 +760,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- disable_percpu_irq(irq);
- else
- disable_irq_nosync(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
per_cpu(cpu_armpmu, cpu) = NULL;
@@ -870,8 +982,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (!__oprofile_cpu_pmu)
__oprofile_cpu_pmu = pmu;
- pr_info("enabled with %s PMU driver, %d counters available\n",
- pmu->name, pmu->num_events);
+ pr_info("enabled with %s PMU driver, %d counters available%s\n",
+ pmu->name, pmu->num_events,
+ has_nmi ? ", using NMIs" : "");
return 0;