summaryrefslogtreecommitdiff
path: root/drivers/perf/arm_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r--drivers/perf/arm_pmu.c78
1 files changed, 46 insertions, 32 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f5e1008a223d..b37b57294566 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -534,6 +534,24 @@ static int armpmu_filter_match(struct perf_event *event)
return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
+static ssize_t armpmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
+ return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
+}
+
+static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
+
+static struct attribute *armpmu_common_attrs[] = {
+ &dev_attr_cpus.attr,
+ NULL,
+};
+
+static struct attribute_group armpmu_common_attr_group = {
+ .attrs = armpmu_common_attrs,
+};
+
static void armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
@@ -549,7 +567,10 @@ static void armpmu_init(struct arm_pmu *armpmu)
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
+ .attr_groups = armpmu->attr_groups,
};
+ armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
+ &armpmu_common_attr_group;
}
/* Set at runtime when we know what CPU type we are. */
@@ -602,7 +623,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
on_each_cpu_mask(&cpu_pmu->supported_cpus,
cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
@@ -616,7 +637,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
+ if (irq > 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
@@ -638,7 +659,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
}
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
if (err) {
@@ -688,28 +709,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-static DEFINE_SPINLOCK(arm_pmu_lock);
-static LIST_HEAD(arm_pmu_list);
-
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
-static int arm_perf_starting_cpu(unsigned int cpu)
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct arm_pmu *pmu;
-
- spin_lock(&arm_pmu_lock);
- list_for_each_entry(pmu, &arm_pmu_list, entry) {
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- continue;
- if (pmu->reset)
- pmu->reset(pmu);
- }
- spin_unlock(&arm_pmu_lock);
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+ if (pmu->reset)
+ pmu->reset(pmu);
return 0;
}
@@ -821,9 +834,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events)
return -ENOMEM;
- spin_lock(&arm_pmu_lock);
- list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
- spin_unlock(&arm_pmu_lock);
+ err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
+ if (err)
+ goto out_free;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
@@ -859,9 +873,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
out_unregister:
- spin_lock(&arm_pmu_lock);
- list_del(&cpu_pmu->entry);
- spin_unlock(&arm_pmu_lock);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
+out_free:
free_percpu(cpu_hw_events);
return err;
}
@@ -869,9 +883,8 @@ out_unregister:
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- spin_lock(&arm_pmu_lock);
- list_del(&cpu_pmu->entry);
- spin_unlock(&arm_pmu_lock);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
free_percpu(cpu_pmu->hw_events);
}
@@ -919,7 +932,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
irq = platform_get_irq(pdev, i);
- if (irq >= 0) {
+ if (irq > 0) {
bool spi = !irq_is_percpu(irq);
if (i > 0 && spi != using_spi) {
@@ -970,7 +983,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (cpumask_weight(&pmu->supported_cpus) == 0) {
int irq = platform_get_irq(pdev, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
/* If using PPIs, check the affinity of the partition */
int ret;
@@ -1029,7 +1042,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
ret = of_pmu_irq_cfg(pmu);
if (!ret)
ret = init_fn(pmu);
- } else {
+ } else if (probe_table) {
cpumask_setall(&pmu->supported_cpus);
ret = probe_current_pmu(pmu, probe_table);
}
@@ -1039,6 +1052,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
goto out_free;
}
+
ret = cpu_pmu_init(pmu);
if (ret)
goto out_free;
@@ -1069,9 +1083,9 @@ static int arm_pmu_hp_init(void)
{
int ret;
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
- "AP_PERF_ARM_STARTING",
- arm_perf_starting_cpu, NULL);
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
+ "AP_PERF_ARM_STARTING",
+ arm_perf_starting_cpu, NULL);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);