diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/perf/Makefile | 2 | ||||
-rw-r--r-- | drivers/perf/arm_pmu.c | 226 | ||||
-rw-r--r-- | drivers/perf/arm_pmu_platform.c | 235 |
3 files changed, 240 insertions, 223 deletions
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index ef0c6b210345..925cd3903029 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_ARM_PMU) += arm_pmu.o +obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f387d6153099..b3bedfa512eb 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -16,7 +16,6 @@ #include <linux/cpu_pm.h> #include <linux/export.h> #include <linux/kernel.h> -#include <linux/of_device.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -25,7 +24,6 @@ #include <linux/irq.h> #include <linux/irqdesc.h> -#include <asm/cputype.h> #include <asm/irq_regs.h> static int @@ -544,7 +542,7 @@ static void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } -static void armpmu_free_irqs(struct arm_pmu *armpmu) +void armpmu_free_irqs(struct arm_pmu *armpmu) { int cpu; @@ -589,7 +587,7 @@ static int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) return 0; } -static int armpmu_request_irqs(struct arm_pmu *armpmu) +int armpmu_request_irqs(struct arm_pmu *armpmu) { int cpu, err; @@ -783,161 +781,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } -/* - * CPU PMU identification and probing. - */ -static int probe_current_pmu(struct arm_pmu *pmu, - const struct pmu_probe_info *info) -{ - int cpu = get_cpu(); - unsigned int cpuid = read_cpuid_id(); - int ret = -ENODEV; - - pr_info("probing PMU on CPU %d\n", cpu); - - for (; info->init != NULL; info++) { - if ((cpuid & info->mask) != info->cpuid) - continue; - ret = info->init(pmu); - break; - } - - put_cpu(); - return ret; -} - -static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) -{ - int cpu, ret; - struct pmu_hw_events __percpu *hw_events = pmu->hw_events; - - ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); - if (ret) - return ret; - - for_each_cpu(cpu, &pmu->supported_cpus) - per_cpu(hw_events->irq, cpu) = irq; - - return 0; -} - -static bool pmu_has_irq_affinity(struct device_node *node) -{ - return !!of_find_property(node, "interrupt-affinity", NULL); -} - -static int pmu_parse_irq_affinity(struct device_node *node, int i) -{ - struct device_node *dn; - int cpu; - - /* - * If we don't have an interrupt-affinity property, we guess irq - * affinity matches our logical CPU order, as we used to assume. - * This is fragile, so we'll warn in pmu_parse_irqs(). - */ - if (!pmu_has_irq_affinity(node)) - return i; - - dn = of_parse_phandle(node, "interrupt-affinity", i); - if (!dn) { - pr_warn("failed to parse interrupt-affinity[%d] for %s\n", - i, node->name); - return -EINVAL; - } - - /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) { - struct device_node *cpu_dn; - - cpu_dn = of_cpu_device_node_get(cpu); - of_node_put(cpu_dn); - - if (dn == cpu_dn) - break; - } - - if (cpu >= nr_cpu_ids) { - pr_warn("failed to find logical CPU for %s\n", dn->name); - } - - of_node_put(dn); - - return cpu; -} - -static int pmu_parse_irqs(struct arm_pmu *pmu) -{ - int i = 0, irqs; - struct platform_device *pdev = pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = pmu->hw_events; - - irqs = platform_irq_count(pdev); - if (irqs < 0) { - pr_err("unable to count PMU IRQs\n"); - return irqs; - } - - /* - * In this case we have no idea which CPUs are covered by the PMU. - * To match our prior behaviour, we assume all CPUs in this case. - */ - if (irqs == 0) { - pr_warn("no irqs for PMU, sampling events not supported\n"); - pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - cpumask_setall(&pmu->supported_cpus); - return 0; - } - - if (irqs == 1) { - int irq = platform_get_irq(pdev, 0); - if (irq && irq_is_percpu(irq)) - return pmu_parse_percpu_irq(pmu, irq); - } - - if (!pmu_has_irq_affinity(pdev->dev.of_node)) { - pr_warn("no interrupt-affinity property for %s, guessing.\n", - of_node_full_name(pdev->dev.of_node)); - } - - /* - * Some platforms have all PMU IRQs OR'd into a single IRQ, with a - * special platdata function that attempts to demux them. - */ - if (dev_get_platdata(&pdev->dev)) - cpumask_setall(&pmu->supported_cpus); - - for (i = 0; i < irqs; i++) { - int cpu, irq; - - irq = platform_get_irq(pdev, i); - if (WARN_ON(irq <= 0)) - continue; - - if (irq_is_percpu(irq)) { - pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); - return -EINVAL; - } - - cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); - if (cpu < 0) - return cpu; - if (cpu >= nr_cpu_ids) - continue; - - if (per_cpu(hw_events->irq, cpu)) { - pr_warn("multiple PMU IRQs for the same CPU detected\n"); - return -EINVAL; - } - - per_cpu(hw_events->irq, cpu) = irq; - cpumask_set_cpu(cpu, &pmu->supported_cpus); - } - - return 0; -} - -static struct arm_pmu *armpmu_alloc(void) +struct arm_pmu *armpmu_alloc(void) { struct arm_pmu *pmu; int cpu; @@ -994,7 +838,7 @@ out: return NULL; } -static void armpmu_free(struct arm_pmu *pmu) +void armpmu_free(struct arm_pmu *pmu) { free_percpu(pmu->hw_events); kfree(pmu); @@ -1025,68 +869,6 @@ out_destroy: return ret; } -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table) -{ - const struct of_device_id *of_id; - armpmu_init_fn init_fn; - struct device_node *node = pdev->dev.of_node; - struct arm_pmu *pmu; - int ret = -ENODEV; - - pmu = armpmu_alloc(); - if (!pmu) - return -ENOMEM; - - pmu->plat_device = pdev; - - ret = pmu_parse_irqs(pmu); - if (ret) - goto out_free; - - if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { - init_fn = of_id->data; - - pmu->secure_access = of_property_read_bool(pdev->dev.of_node, - "secure-reg-access"); - - /* arm64 systems boot only as non-secure */ - if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { - pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); - pmu->secure_access = false; - } - - ret = init_fn(pmu); - } else if (probe_table) { - cpumask_setall(&pmu->supported_cpus); - ret = probe_current_pmu(pmu, probe_table); - } - - if (ret) { - pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); - goto out_free; - } - - ret = armpmu_request_irqs(pmu); - if (ret) - goto out_free_irqs; - - ret = armpmu_register(pmu); - if (ret) - goto out_free; - - return 0; - -out_free_irqs: - armpmu_free_irqs(pmu); -out_free: - pr_info("%s: failed to register PMU devices!\n", - of_node_full_name(node)); - armpmu_free(pmu); - return ret; -} - static int arm_pmu_hp_init(void) { int ret; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c new file mode 100644 index 000000000000..69255f53057a --- /dev/null +++ b/drivers/perf/arm_pmu_platform.c @@ -0,0 +1,235 @@ +/* + * platform_device probing code for ARM performance counters. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/bug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> +#include <linux/kconfig.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/percpu.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/smp.h> + +static int probe_current_pmu(struct arm_pmu *pmu, + const struct pmu_probe_info *info) +{ + int cpu = get_cpu(); + unsigned int cpuid = read_cpuid_id(); + int ret = -ENODEV; + + pr_info("probing PMU on CPU %d\n", cpu); + + for (; info->init != NULL; info++) { + if ((cpuid & info->mask) != info->cpuid) + continue; + ret = info->init(pmu); + break; + } + + put_cpu(); + return ret; +} + +static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) +{ + int cpu, ret; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) + return ret; + + for_each_cpu(cpu, &pmu->supported_cpus) + per_cpu(hw_events->irq, cpu) = irq; + + return 0; +} + +static bool pmu_has_irq_affinity(struct device_node *node) +{ + return !!of_find_property(node, "interrupt-affinity", NULL); +} + +static int pmu_parse_irq_affinity(struct device_node *node, int i) +{ + struct device_node *dn; + int cpu; + + /* + * If we don't have an interrupt-affinity property, we guess irq + * affinity matches our logical CPU order, as we used to assume. + * This is fragile, so we'll warn in pmu_parse_irqs(). + */ + if (!pmu_has_irq_affinity(node)) + return i; + + dn = of_parse_phandle(node, "interrupt-affinity", i); + if (!dn) { + pr_warn("failed to parse interrupt-affinity[%d] for %s\n", + i, node->name); + return -EINVAL; + } + + /* Now look up the logical CPU number */ + for_each_possible_cpu(cpu) { + struct device_node *cpu_dn; + + cpu_dn = of_cpu_device_node_get(cpu); + of_node_put(cpu_dn); + + if (dn == cpu_dn) + break; + } + + if (cpu >= nr_cpu_ids) { + pr_warn("failed to find logical CPU for %s\n", dn->name); + } + + of_node_put(dn); + + return cpu; +} + +static int pmu_parse_irqs(struct arm_pmu *pmu) +{ + int i = 0, num_irqs; + struct platform_device *pdev = pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) { + pr_err("unable to count PMU IRQs\n"); + return num_irqs; + } + + /* + * In this case we have no idea which CPUs are covered by the PMU. + * To match our prior behaviour, we assume all CPUs in this case. + */ + if (num_irqs == 0) { + pr_warn("no irqs for PMU, sampling events not supported\n"); + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + cpumask_setall(&pmu->supported_cpus); + return 0; + } + + if (num_irqs == 1) { + int irq = platform_get_irq(pdev, 0); + if (irq && irq_is_percpu(irq)) + return pmu_parse_percpu_irq(pmu, irq); + } + + if (!pmu_has_irq_affinity(pdev->dev.of_node)) { + pr_warn("no interrupt-affinity property for %s, guessing.\n", + of_node_full_name(pdev->dev.of_node)); + } + + /* + * Some platforms have all PMU IRQs OR'd into a single IRQ, with a + * special platdata function that attempts to demux them. + */ + if (dev_get_platdata(&pdev->dev)) + cpumask_setall(&pmu->supported_cpus); + + for (i = 0; i < num_irqs; i++) { + int cpu, irq; + + irq = platform_get_irq(pdev, i); + if (WARN_ON(irq <= 0)) + continue; + + if (irq_is_percpu(irq)) { + pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); + return -EINVAL; + } + + cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); + if (cpu < 0) + return cpu; + if (cpu >= nr_cpu_ids) + continue; + + if (per_cpu(hw_events->irq, cpu)) { + pr_warn("multiple PMU IRQs for the same CPU detected\n"); + return -EINVAL; + } + + per_cpu(hw_events->irq, cpu) = irq; + cpumask_set_cpu(cpu, &pmu->supported_cpus); + } + + return 0; +} + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table) +{ + const struct of_device_id *of_id; + armpmu_init_fn init_fn; + struct device_node *node = pdev->dev.of_node; + struct arm_pmu *pmu; + int ret = -ENODEV; + + pmu = armpmu_alloc(); + if (!pmu) + return -ENOMEM; + + pmu->plat_device = pdev; + + ret = pmu_parse_irqs(pmu); + if (ret) + goto out_free; + + if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { + init_fn = of_id->data; + + pmu->secure_access = of_property_read_bool(pdev->dev.of_node, + "secure-reg-access"); + + /* arm64 systems boot only as non-secure */ + if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { + pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); + pmu->secure_access = false; + } + + ret = init_fn(pmu); + } else if (probe_table) { + cpumask_setall(&pmu->supported_cpus); + ret = probe_current_pmu(pmu, probe_table); + } + + if (ret) { + pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); + goto out_free; + } + + ret = armpmu_request_irqs(pmu); + if (ret) + goto out_free_irqs; + + ret = armpmu_register(pmu); + if (ret) + goto out_free; + + return 0; + +out_free_irqs: + armpmu_free_irqs(pmu); +out_free: + pr_info("%s: failed to register PMU devices!\n", + of_node_full_name(node)); + armpmu_free(pmu); + return ret; +} |