From ae7c18380495ac5c14a614fdb6c452c3bf9148ac Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 7 Mar 2017 20:40:05 +0800 Subject: ACPI: platform-msi: retrieve devid from IORT For devices connecting to an ITS, the devices need to identify themself through a devid; this devid is represented in the IORT table in named component node [1] for platform devices, so this patch adds code that scans the IORT table to retrieve the devices devid. Add an IORT interface to collect ITS devices devid to carry out platform devices MSI mappings with IORT tables. [1]: https://static.docs.arm.com/den0049/b/DEN0049B_IO_Remapping_Table.pdf Signed-off-by: Hanjun Guo [lorenzo.pieralisi@arm.com: rewrote commit log/dropped ITS changes] Signed-off-by: Lorenzo Pieralisi Tested-by: Ming Lei Tested-by: Wei Xu Tested-by: Sinan Kaya Cc: Marc Zyngier Cc: Lorenzo Pieralisi Cc: Tomasz Nowicki Cc: Thomas Gleixner --- include/linux/acpi_iort.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 77e08099e554..fd8b9698e1d1 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -34,6 +34,7 @@ void acpi_iort_init(void); bool iort_node_match(u8 type); u32 iort_msi_map_rid(struct device *dev, u32 req_id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_set_dma_mask(struct device *dev); const struct iommu_ops *iort_iommu_configure(struct device *dev); -- cgit v1.2.3 From d4f54a186667ffd19eac8e3f48c51d940a9b9784 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Tue, 7 Mar 2017 20:40:06 +0800 Subject: ACPI: platform: setup MSI domain for ACPI based platform device By allowing platform MSI domain to be created on ACPI platforms, a platform device MSI domain can be set-up when it is probed. In order to do that, the MSI domain the platform device connects to should be retrieved, so the iort_get_platform_device_domain() is introduced to retrieve the domain from the IORT kernel layer. With the domain retrieved, we need a proper way to set the domain to platform device. Given that some platform devices (irqchips) require the MSI irqdomain to be their interrupt parent domain, the MSI irqdomain should be determined before platform device is probed but after the platform device is allocated which means that the code setting up the MSI irqdomain, ie acpi_configure_pmsi_domain() should be called in acpi_platform_notify() (that is triggered after adding a device but before the respective driver is probed) for the platform MSI domain code set-up path to work properly. Acked-by: Rafael J. Wysocki [for glue.c] Signed-off-by: Hanjun Guo [lorenzo.pieralisi@arm.com: rewrote commit log] Signed-off-by: Lorenzo Pieralisi Tested-by: Ming Lei Tested-by: Wei Xu Tested-by: Sinan Kaya Cc: Marc Zyngier Cc: Lorenzo Pieralisi Cc: Tomasz Nowicki --- drivers/acpi/arm64/iort.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/acpi/glue.c | 6 ++++++ include/linux/acpi_iort.h | 2 ++ 3 files changed, 58 insertions(+) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index fb95ceb3840d..22e08d272db7 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -548,6 +548,56 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); } +/** + * iort_get_platform_device_domain() - Find MSI domain related to a + * platform device + * @dev: the dev pointer associated with the platform device + * + * Returns: the MSI domain for this device, NULL otherwise + */ +static struct irq_domain *iort_get_platform_device_domain(struct device *dev) +{ + struct acpi_iort_node *node, *msi_parent; + struct fwnode_handle *iort_fwnode; + struct acpi_iort_its_group *its; + int i; + + /* find its associated iort node */ + node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, + iort_match_node_callback, dev); + if (!node) + return NULL; + + /* then find its msi parent node */ + for (i = 0; i < node->mapping_count; i++) { + msi_parent = iort_node_map_platform_id(node, NULL, + IORT_MSI_TYPE, i); + if (msi_parent) + break; + } + + if (!msi_parent) + return NULL; + + /* Move to ITS specific data */ + its = (struct acpi_iort_its_group *)msi_parent->node_data; + + iort_fwnode = iort_find_domain_token(its->identifiers[0]); + if (!iort_fwnode) + return NULL; + + return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); +} + +void acpi_configure_pmsi_domain(struct device *dev) +{ + struct irq_domain *msi_domain; + + msi_domain = iort_get_platform_device_domain(dev); + if (msi_domain) + dev_set_msi_domain(dev, msi_domain); +} + static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) { u32 *rid = data; diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index fb19e1cdb641..ec31b439b4c8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -6,6 +6,8 @@ * * This file is released under the GPLv2. */ + +#include #include #include #include @@ -14,6 +16,7 @@ #include #include #include +#include #include "internal.h" @@ -322,6 +325,9 @@ static int acpi_platform_notify(struct device *dev) if (!adev) goto out; + if (dev->bus == &platform_bus_type) + acpi_configure_pmsi_domain(dev); + if (type && type->setup) type->setup(dev); else if (adev->handler && adev->handler->bind) diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index fd8b9698e1d1..26e25d85eb3e 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -34,6 +34,7 @@ void acpi_iort_init(void); bool iort_node_match(u8 type); u32 iort_msi_map_rid(struct device *dev, u32 req_id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_set_dma_mask(struct device *dev); @@ -46,6 +47,7 @@ static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) static inline struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) { return NULL; } +static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ static inline void iort_set_dma_mask(struct device *dev) { } static inline -- cgit v1.2.3 From 7ed98e0168bd23d8ea3294e95254cc5b4000c948 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 10 Mar 2017 10:46:14 +0000 Subject: drivers/perf: arm_pmu: manage interrupts per-cpu When requesting or freeing interrupts, we use platform_get_irq() to find relevant irqs, backing this up with additional information in an optional irq_affinity table. This means that our irq request and free paths are tied to a platform_device, and our request path must jump through a number of hoops in order to determine the required affinity of each interrupt. Given that the affinity must be static, we can compute the affinity once up-front at probe time, simplifying the irq request and free paths. By recording interrupts in a per-cpu data structure, we simplify a few paths, and permit a subsequent rework of the request and free paths. Signed-off-by: Mark Rutland [will: rename local nr_irqs variable to avoid conflict with global] Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 314 ++++++++++++++++++++++--------------------- include/linux/perf/arm_pmu.h | 3 +- 2 files changed, 166 insertions(+), 151 deletions(-) (limited to 'include') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index ad60e966f174..e984653b93aa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -617,94 +617,76 @@ static void cpu_pmu_disable_percpu_irq(void *data) static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { - int i, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; + int cpu; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - irqs = min(pmu_device->num_resources, num_possible_cpus()); - - irq = platform_get_irq(pmu_device, 0); - if (irq > 0 && irq_is_percpu(irq)) { - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_disable_percpu_irq, &irq, 1); - free_percpu_irq(irq, &hw_events->percpu_pmu); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq > 0) - free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + for_each_cpu(cpu, &cpu_pmu->supported_cpus) { + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) + continue; + + if (irq_is_percpu(irq)) { + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_disable_percpu_irq, &irq, 1); + free_percpu_irq(irq, &hw_events->percpu_pmu); + + break; } + + if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) + continue; + + free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) { - int i, err, irq, irqs; - struct platform_device *pmu_device = cpu_pmu->plat_device; + int cpu, err; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; - if (!pmu_device) - return -ENODEV; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { - pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); - return 0; - } - - irq = platform_get_irq(pmu_device, 0); - if (irq > 0 && irq_is_percpu(irq)) { - err = request_percpu_irq(irq, handler, "arm-pmu", - &hw_events->percpu_pmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - return err; - } - - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_enable_percpu_irq, &irq, 1); - } else { - for (i = 0; i < irqs; ++i) { - int cpu = i; - - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - if (cpu_pmu->irq_affinity) - cpu = cpu_pmu->irq_affinity[i]; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; - } + for_each_cpu(cpu, &cpu_pmu->supported_cpus) { + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) + continue; - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", - per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, handler, "arm-pmu", + &hw_events->percpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } - cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_enable_percpu_irq, &irq, 1); + + break; } + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(cpu)) && + num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + continue; + } + + err = request_irq(irq, handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + per_cpu_ptr(&hw_events->percpu_pmu, cpu)); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } + + cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); } return 0; @@ -846,10 +828,6 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, cpu_pmu, 1); - /* If no interrupts available, set the corresponding capability flag */ - if (!platform_get_irq(cpu_pmu->plat_device, 0)) - cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - /* * This is a CPU PMU potentially in a heterogeneous configuration (e.g. * big.LITTLE). This is not an uncore PMU, and we have taken ctx @@ -897,98 +875,133 @@ static int probe_current_pmu(struct arm_pmu *pmu, return ret; } -static int of_pmu_irq_cfg(struct arm_pmu *pmu) +static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) { - int *irqs, i = 0; - bool using_spi = false; - struct platform_device *pdev = pmu->plat_device; + int cpu, ret; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; - irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - if (!irqs) - return -ENOMEM; + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) + return ret; - do { - struct device_node *dn; - int cpu, irq; + for_each_cpu(cpu, &pmu->supported_cpus) + per_cpu(hw_events->irq, cpu) = irq; - /* See if we have an affinity entry */ - dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); - if (!dn) - break; + return 0; +} - /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ - irq = platform_get_irq(pdev, i); - if (irq > 0) { - bool spi = !irq_is_percpu(irq); - - if (i > 0 && spi != using_spi) { - pr_err("PPI/SPI IRQ type mismatch for %s!\n", - dn->name); - of_node_put(dn); - kfree(irqs); - return -EINVAL; - } +static bool pmu_has_irq_affinity(struct device_node *node) +{ + return !!of_find_property(node, "interrupt-affinity", NULL); +} - using_spi = spi; - } +static int pmu_parse_irq_affinity(struct device_node *node, int i) +{ + struct device_node *dn; + int cpu; - /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) { - struct device_node *cpu_dn; + /* + * If we don't have an interrupt-affinity property, we guess irq + * affinity matches our logical CPU order, as we used to assume. + * This is fragile, so we'll warn in pmu_parse_irqs(). + */ + if (!pmu_has_irq_affinity(node)) + return i; - cpu_dn = of_cpu_device_node_get(cpu); - of_node_put(cpu_dn); + dn = of_parse_phandle(node, "interrupt-affinity", i); + if (!dn) { + pr_warn("failed to parse interrupt-affinity[%d] for %s\n", + i, node->name); + return -EINVAL; + } - if (dn == cpu_dn) - break; - } + /* Now look up the logical CPU number */ + for_each_possible_cpu(cpu) { + struct device_node *cpu_dn; + + cpu_dn = of_cpu_device_node_get(cpu); + of_node_put(cpu_dn); - if (cpu >= nr_cpu_ids) { - pr_warn("Failed to find logical CPU for %s\n", - dn->name); - of_node_put(dn); - cpumask_setall(&pmu->supported_cpus); + if (dn == cpu_dn) break; - } - of_node_put(dn); + } - /* For SPIs, we need to track the affinity per IRQ */ - if (using_spi) { - if (i >= pdev->num_resources) - break; + if (cpu >= nr_cpu_ids) { + pr_warn("failed to find logical CPU for %s\n", dn->name); + } - irqs[i] = cpu; - } + of_node_put(dn); - /* Keep track of the CPUs containing this PMU type */ - cpumask_set_cpu(cpu, &pmu->supported_cpus); - i++; - } while (1); + return cpu; +} + +static int pmu_parse_irqs(struct arm_pmu *pmu) +{ + int i = 0, irqs; + struct platform_device *pdev = pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + irqs = platform_irq_count(pdev); + if (irqs < 0) { + pr_err("unable to count PMU IRQs\n"); + return irqs; + } + + /* + * In this case we have no idea which CPUs are covered by the PMU. + * To match our prior behaviour, we assume all CPUs in this case. + */ + if (irqs == 0) { + pr_warn("no irqs for PMU, sampling events not supported\n"); + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + cpumask_setall(&pmu->supported_cpus); + return 0; + } - /* If we didn't manage to parse anything, try the interrupt affinity */ - if (cpumask_weight(&pmu->supported_cpus) == 0) { + if (irqs == 1) { int irq = platform_get_irq(pdev, 0); + if (irq && irq_is_percpu(irq)) + return pmu_parse_percpu_irq(pmu, irq); + } - if (irq > 0 && irq_is_percpu(irq)) { - /* If using PPIs, check the affinity of the partition */ - int ret; + if (!pmu_has_irq_affinity(pdev->dev.of_node)) { + pr_warn("no interrupt-affinity property for %s, guessing.\n", + of_node_full_name(pdev->dev.of_node)); + } - ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); - if (ret) { - kfree(irqs); - return ret; - } - } else { - /* Otherwise default to all CPUs */ - cpumask_setall(&pmu->supported_cpus); + /* + * Some platforms have all PMU IRQs OR'd into a single IRQ, with a + * special platdata function that attempts to demux them. + */ + if (dev_get_platdata(&pdev->dev)) + cpumask_setall(&pmu->supported_cpus); + + for (i = 0; i < irqs; i++) { + int cpu, irq; + + irq = platform_get_irq(pdev, i); + if (WARN_ON(irq <= 0)) + continue; + + if (irq_is_percpu(irq)) { + pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); + return -EINVAL; } - } - /* If we matched up the IRQ affinities, use them to route the SPIs */ - if (using_spi && i == pdev->num_resources) - pmu->irq_affinity = irqs; - else - kfree(irqs); + cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); + if (cpu < 0) + return cpu; + if (cpu >= nr_cpu_ids) + continue; + + if (per_cpu(hw_events->irq, cpu)) { + pr_warn("multiple PMU IRQs for the same CPU detected\n"); + return -EINVAL; + } + + per_cpu(hw_events->irq, cpu) = irq; + cpumask_set_cpu(cpu, &pmu->supported_cpus); + } return 0; } @@ -1050,6 +1063,10 @@ int arm_pmu_device_probe(struct platform_device *pdev, pmu->plat_device = pdev; + ret = pmu_parse_irqs(pmu); + if (ret) + goto out_free; + if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { init_fn = of_id->data; @@ -1062,9 +1079,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, pmu->secure_access = false; } - ret = of_pmu_irq_cfg(pmu); - if (!ret) - ret = init_fn(pmu); + ret = init_fn(pmu); } else if (probe_table) { cpumask_setall(&pmu->supported_cpus); ret = probe_current_pmu(pmu, probe_table); @@ -1097,7 +1112,6 @@ out_destroy: out_free: pr_info("%s: failed to register PMU devices!\n", of_node_full_name(node)); - kfree(pmu->irq_affinity); armpmu_free(pmu); return ret; } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 8462da266089..05a3eb447fc8 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -75,6 +75,8 @@ struct pmu_hw_events { * already have to allocate this struct per cpu. */ struct arm_pmu *percpu_pmu; + + int irq; }; enum armpmu_attr_groups { @@ -88,7 +90,6 @@ struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; cpumask_t supported_cpus; - int *irq_affinity; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct perf_event *event); -- cgit v1.2.3 From c09adab01e4aeecfa3dfae0946409844400c5901 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 10 Mar 2017 10:46:15 +0000 Subject: drivers/perf: arm_pmu: split irq request from enable For historical reasons, we lazily request and free interrupts in the arm pmu driver. This requires us to refcount use of the pmu (by way of counting the active events) in order to request/free interrupts at the correct times, which complicates the driver somewhat. The existing logic is flawed, as it only considers currently online CPUs when requesting, freeing, or managing the affinity of interrupts. Intervening hotplug events can result in erroneous IRQ affinity, online CPUs for which interrupts have not been requested, or offline CPUs whose interrupts are still requested. To fix this, this patch splits the requesting of interrupts from any per-cpu management (i.e. per-cpu enable/disable, and configuration of cpu affinity). We now request all interrupts up-front at probe time (and never free them, since we never unregister PMUs). The management of affinity, and per-cpu enable/disable now happens in our cpu hotplug callback, ensuring it occurs consistently. This means that we must now invoke the CPU hotplug callback at boot time in order to configure IRQs, and since the callback also resets the PMU hardware, we can remove the duplicate reset in the probe path. This rework renders our event refcounting unnecessary, so this is removed. Signed-off-by: Mark Rutland [will: make armpmu_get_cpu_irq static] Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 153 ++++++++++++++----------------------------- include/linux/perf/arm_pmu.h | 4 -- 2 files changed, 50 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index e984653b93aa..a1dfe895cb1d 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -352,37 +352,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) return ret; } -static void -armpmu_release_hardware(struct arm_pmu *armpmu) -{ - armpmu->free_irq(armpmu); -} - -static int -armpmu_reserve_hardware(struct arm_pmu *armpmu) -{ - int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); - if (err) { - armpmu_release_hardware(armpmu); - return err; - } - - return 0; -} - -static void -hw_perf_event_destroy(struct perf_event *event) -{ - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - atomic_t *active_events = &armpmu->active_events; - struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; - - if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { - armpmu_release_hardware(armpmu); - mutex_unlock(pmu_reserve_mutex); - } -} - static int event_requires_mode_exclusion(struct perf_event_attr *attr) { @@ -455,8 +424,6 @@ __hw_perf_event_init(struct perf_event *event) static int armpmu_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - int err = 0; - atomic_t *active_events = &armpmu->active_events; /* * Reject CPU-affine events for CPUs that are of a different class to @@ -476,26 +443,7 @@ static int armpmu_event_init(struct perf_event *event) if (armpmu->map_event(event) == -ENOENT) return -ENOENT; - event->destroy = hw_perf_event_destroy; - - if (!atomic_inc_not_zero(active_events)) { - mutex_lock(&armpmu->reserve_mutex); - if (atomic_read(active_events) == 0) - err = armpmu_reserve_hardware(armpmu); - - if (!err) - atomic_inc(active_events); - mutex_unlock(&armpmu->reserve_mutex); - } - - if (err) - return err; - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; + return __hw_perf_event_init(event); } static void armpmu_enable(struct pmu *pmu) @@ -555,9 +503,6 @@ static struct attribute_group armpmu_common_attr_group = { static void armpmu_init(struct arm_pmu *armpmu) { - atomic_set(&armpmu->active_events, 0); - mutex_init(&armpmu->reserve_mutex); - armpmu->pmu = (struct pmu) { .pmu_enable = armpmu_enable, .pmu_disable = armpmu_disable, @@ -601,21 +546,7 @@ int perf_num_counters(void) } EXPORT_SYMBOL_GPL(perf_num_counters); -static void cpu_pmu_enable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - enable_percpu_irq(irq, IRQ_TYPE_NONE); -} - -static void cpu_pmu_disable_percpu_irq(void *data) -{ - int irq = *(int *)data; - - disable_percpu_irq(irq); -} - -static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) +static void cpu_pmu_free_irqs(struct arm_pmu *cpu_pmu) { int cpu; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; @@ -626,10 +557,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) continue; if (irq_is_percpu(irq)) { - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); - break; } @@ -640,7 +568,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) } } -static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) +static int cpu_pmu_request_irqs(struct arm_pmu *cpu_pmu, irq_handler_t handler) { int cpu, err; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; @@ -656,25 +584,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); - return err; } - on_each_cpu_mask(&cpu_pmu->supported_cpus, - cpu_pmu_enable_percpu_irq, &irq, 1); - - break; - } - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(cpu)) && - num_possible_cpus() > 1) { - pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, cpu); - continue; + return err; } err = request_irq(irq, handler, @@ -692,6 +604,12 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } +static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) +{ + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + return per_cpu(hw_events->irq, cpu); +} + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are @@ -701,11 +619,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) { struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) return 0; if (pmu->reset) pmu->reset(pmu); + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq) { + if (irq_is_percpu(irq)) { + enable_percpu_irq(irq, IRQ_TYPE_NONE); + return 0; + } + + if (irq_force_affinity(irq, cpumask_of(cpu)) && + num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + } + } + + return 0; +} + +static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq && irq_is_percpu(irq)) + disable_percpu_irq(irq); + return 0; } @@ -811,8 +760,12 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) { int err; - err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, - &cpu_pmu->node); + err = cpu_pmu_request_irqs(cpu_pmu, armpmu_dispatch_irq); + if (err) + goto out; + + err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); if (err) goto out; @@ -820,14 +773,6 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (err) goto out_unregister; - cpu_pmu->request_irq = cpu_pmu_request_irq; - cpu_pmu->free_irq = cpu_pmu_free_irq; - - /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) - on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, - cpu_pmu, 1); - /* * This is a CPU PMU potentially in a heterogeneous configuration (e.g. * big.LITTLE). This is not an uncore PMU, and we have taken ctx @@ -842,6 +787,7 @@ out_unregister: cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, &cpu_pmu->node); out: + cpu_pmu_free_irqs(cpu_pmu); return err; } @@ -1122,7 +1068,8 @@ static int arm_pmu_hp_init(void) ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, "perf/arm/pmu:starting", - arm_perf_starting_cpu, NULL); + arm_perf_starting_cpu, + arm_perf_teardown_cpu); if (ret) pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", ret); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 05a3eb447fc8..44f43fcf2524 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -105,12 +105,8 @@ struct arm_pmu { void (*start)(struct arm_pmu *); void (*stop)(struct arm_pmu *); void (*reset)(void *); - int (*request_irq)(struct arm_pmu *, irq_handler_t handler); - void (*free_irq)(struct arm_pmu *); int (*map_event)(struct perf_event *event); int num_events; - atomic_t active_events; - struct mutex reserve_mutex; u64 max_period; bool secure_access; /* 32-bit ARM only */ #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 -- cgit v1.2.3 From 3071f13d75f627ed8648535815a0506d50cbc6ed Mon Sep 17 00:00:00 2001 From: Agustin Vega-Frias Date: Fri, 31 Mar 2017 14:13:43 -0400 Subject: perf: qcom: Add L3 cache PMU driver This adds a new dynamic PMU to the Perf Events framework to program and control the L3 cache PMUs in some Qualcomm Technologies SOCs. The driver supports a distributed cache architecture where the overall cache for a socket is comprised of multiple slices each with its own PMU. Access to each individual PMU is provided even though all CPUs share all the slices. User space needs to aggregate to individual counts to provide a global picture. The driver exports formatting and event information to sysfs so it can be used by the perf user space tools with the syntaxes: perf stat -a -e l3cache_0_0/read-miss/ perf stat -a -e l3cache_0_0/event=0x21/ Acked-by: Mark Rutland Signed-off-by: Agustin Vega-Frias [will: fixed sparse issues] Signed-off-by: Will Deacon --- Documentation/perf/qcom_l3_pmu.txt | 25 ++ drivers/perf/Kconfig | 10 + drivers/perf/Makefile | 1 + drivers/perf/qcom_l3_pmu.c | 849 +++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 5 files changed, 886 insertions(+) create mode 100644 Documentation/perf/qcom_l3_pmu.txt create mode 100644 drivers/perf/qcom_l3_pmu.c (limited to 'include') diff --git a/Documentation/perf/qcom_l3_pmu.txt b/Documentation/perf/qcom_l3_pmu.txt new file mode 100644 index 000000000000..96b3a9444a0d --- /dev/null +++ b/Documentation/perf/qcom_l3_pmu.txt @@ -0,0 +1,25 @@ +Qualcomm Datacenter Technologies L3 Cache Performance Monitoring Unit (PMU) +=========================================================================== + +This driver supports the L3 cache PMUs found in Qualcomm Datacenter Technologies +Centriq SoCs. The L3 cache on these SOCs is composed of multiple slices, shared +by all cores within a socket. Each slice is exposed as a separate uncore perf +PMU with device name l3cache__. User space is responsible +for aggregating across slices. + +The driver provides a description of its available events and configuration +options in sysfs, see /sys/devices/l3cache*. Given that these are uncore PMUs +the driver also exposes a "cpumask" sysfs attribute which contains a mask +consisting of one CPU per socket which will be used to handle all the PMU +events on that socket. + +The hardware implements 32bit event counters and has a flat 8bit event space +exposed via the "event" format attribute. In addition to the 32bit physical +counters the driver supports virtual 64bit hardware counters by using hardware +counter chaining. This feature is exposed via the "lc" (long counter) format +flag. E.g.: + + perf stat -e l3cache_0_0/read-miss,lc/ + +Given that these are uncore PMUs the driver does not support sampling, therefore +"perf record" will not work. Per-task perf sessions are not supported. diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 93651907874f..c436e0d303e7 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -21,6 +21,16 @@ config QCOM_L2_PMU Adds the L2 cache PMU into the perf events subsystem for monitoring L2 cache events. +config QCOM_L3_PMU + bool "Qualcomm Technologies L3-cache PMU" + depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI + select QCOM_IRQ_COMBINER + help + Provides support for the L3 cache performance monitor unit (PMU) + in Qualcomm Technologies processors. + Adds the L3 cache PMU into the perf events subsystem for + monitoring L3 cache events. + config XGENE_PMU depends on PERF_EVENTS && ARCH_XGENE bool "APM X-Gene SoC PMU" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index ef24833c94a8..ef0c6b210345 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_ARM_PMU) += arm_pmu.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o +obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c new file mode 100644 index 000000000000..7f6b62b29e9d --- /dev/null +++ b/drivers/perf/qcom_l3_pmu.c @@ -0,0 +1,849 @@ +/* + * Driver for the L3 cache PMUs in Qualcomm Technologies chips. + * + * The driver supports a distributed cache architecture where the overall + * cache for a socket is comprised of multiple slices each with its own PMU. + * Access to each individual PMU is provided even though all CPUs share all + * the slices. User space needs to aggregate to individual counts to provide + * a global picture. + * + * See Documentation/perf/qcom_l3_pmu.txt for more details. + * + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * General constants + */ + +/* Number of counters on each PMU */ +#define L3_NUM_COUNTERS 8 +/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */ +#define L3_EVTYPE_MASK 0xFF +/* + * Bit position of the 'long counter' flag within perf_event_attr.config. + * Reserve some space between the event type and this flag to allow expansion + * in the event type field. + */ +#define L3_EVENT_LC_BIT 32 + +/* + * Register offsets + */ + +/* Perfmon registers */ +#define L3_HML3_PM_CR 0x000 +#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_FILTRA 0x300 +#define L3_HML3_PM_FILTRB 0x308 +#define L3_HML3_PM_FILTRC 0x310 +#define L3_HML3_PM_FILTRAM 0x304 +#define L3_HML3_PM_FILTRBM 0x30C +#define L3_HML3_PM_FILTRCM 0x314 + +/* Basic counter registers */ +#define L3_M_BC_CR 0x500 +#define L3_M_BC_SATROLL_CR 0x504 +#define L3_M_BC_CNTENSET 0x508 +#define L3_M_BC_CNTENCLR 0x50C +#define L3_M_BC_INTENSET 0x510 +#define L3_M_BC_INTENCLR 0x514 +#define L3_M_BC_GANG 0x718 +#define L3_M_BC_OVSR 0x740 +#define L3_M_BC_IRQCTL 0x96C + +/* + * Bit field definitions + */ + +/* L3_HML3_PM_CR */ +#define PM_CR_RESET (0) + +/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */ +#define PMCNT_RESET (0) + +/* L3_HML3_PM_EVTYPEx */ +#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK) + +/* Reset value for all the filter registers */ +#define PM_FLTR_RESET (0) + +/* L3_M_BC_CR */ +#define BC_RESET (1UL << 1) +#define BC_ENABLE (1UL << 0) + +/* L3_M_BC_SATROLL_CR */ +#define BC_SATROLL_CR_RESET (0) + +/* L3_M_BC_CNTENSET */ +#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7)) + +/* L3_M_BC_CNTENCLR */ +#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_CNTENCLR_RESET (0xFF) + +/* L3_M_BC_INTENSET */ +#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7)) + +/* L3_M_BC_INTENCLR */ +#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_INTENCLR_RESET (0xFF) + +/* L3_M_BC_GANG */ +#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_GANG_RESET (0) + +/* L3_M_BC_OVSR */ +#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define PMOVSRCLR_RESET (0xFF) + +/* L3_M_BC_IRQCTL */ +#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_IRQCTL_RESET (0x0) + +/* + * Events + */ + +#define L3_EVENT_CYCLES 0x01 +#define L3_EVENT_READ_HIT 0x20 +#define L3_EVENT_READ_MISS 0x21 +#define L3_EVENT_READ_HIT_D 0x22 +#define L3_EVENT_READ_MISS_D 0x23 +#define L3_EVENT_WRITE_HIT 0x24 +#define L3_EVENT_WRITE_MISS 0x25 + +/* + * Decoding of settings from perf_event_attr + * + * The config format for perf events is: + * - config: bits 0-7: event type + * bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits + */ + +static inline u32 get_event_type(struct perf_event *event) +{ + return (event->attr.config) & L3_EVTYPE_MASK; +} + +static inline bool event_uses_long_counter(struct perf_event *event) +{ + return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); +} + +static inline int event_num_counters(struct perf_event *event) +{ + return event_uses_long_counter(event) ? 2 : 1; +} + +/* + * Main PMU, inherits from the core perf PMU type + */ +struct l3cache_pmu { + struct pmu pmu; + struct hlist_node node; + void __iomem *regs; + struct perf_event *events[L3_NUM_COUNTERS]; + unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)]; + cpumask_t cpumask; +}; + +#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu)) + +/* + * Type used to group hardware counter operations + * + * Used to implement two types of hardware counters, standard (32bits) and + * long (64bits). The hardware supports counter chaining which we use to + * implement long counters. This support is exposed via the 'lc' flag field + * in perf_event_attr.config. + */ +struct l3cache_event_ops { + /* Called to start event monitoring */ + void (*start)(struct perf_event *event); + /* Called to stop event monitoring */ + void (*stop)(struct perf_event *event, int flags); + /* Called to update the perf_event */ + void (*update)(struct perf_event *event); +}; + +/* + * Implementation of long counter operations + * + * 64bit counters are implemented by chaining two of the 32bit physical + * counters. The PMU only supports chaining of adjacent even/odd pairs + * and for simplicity the driver always configures the odd counter to + * count the overflows of the lower-numbered even counter. Note that since + * the resulting hardware counter is 64bits no IRQs are required to maintain + * the software counter which is also 64bits. + */ + +static void qcom_l3_cache__64bit_counter_start(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 evsel = get_event_type(event); + u32 gang; + + /* Set the odd counter to count the overflows of the even counter */ + gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); + gang |= GANG_EN(idx + 1); + writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG); + + /* Initialize the hardware counters and reset prev_count*/ + local64_set(&event->hw.prev_count, 0); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + + /* + * Set the event types, the upper half must use zero and the lower + * half the actual event type + */ + writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1)); + writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); + + /* Finally, enable the counters */ + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1)); + writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET); + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); + writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); +} + +static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event, + int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); + + /* Disable the counters */ + writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); + writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR); + + /* Disable chaining */ + writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG); +} + +static void qcom_l3_cache__64bit_counter_update(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 hi, lo; + u64 prev, new; + + do { + prev = local64_read(&event->hw.prev_count); + do { + hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); + lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1))); + new = ((u64)hi << 32) | lo; + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + local64_add(new - prev, &event->count); +} + +static const struct l3cache_event_ops event_ops_long = { + .start = qcom_l3_cache__64bit_counter_start, + .stop = qcom_l3_cache__64bit_counter_stop, + .update = qcom_l3_cache__64bit_counter_update, +}; + +/* + * Implementation of standard counter operations + * + * 32bit counters use a single physical counter and a hardware feature that + * asserts the overflow IRQ on the toggling of the most significant bit in + * the counter. This feature allows the counters to be left free-running + * without needing the usual reprogramming required to properly handle races + * during concurrent calls to update. + */ + +static void qcom_l3_cache__32bit_counter_start(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 evsel = get_event_type(event); + u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); + + /* Set the counter to assert the overflow IRQ on MSB toggling */ + writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); + + /* Initialize the hardware counter and reset prev_count*/ + local64_set(&event->hw.prev_count, 0); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + + /* Set the event type */ + writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); + + /* Enable interrupt generation by this counter */ + writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET); + + /* Finally, enable the counter */ + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); + writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); +} + +static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event, + int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); + + /* Disable the counter */ + writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); + + /* Disable interrupt generation by this counter */ + writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR); + + /* Set the counter to not assert the overflow IRQ on MSB toggling */ + writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); +} + +static void qcom_l3_cache__32bit_counter_update(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 prev, new; + + do { + prev = local64_read(&event->hw.prev_count); + new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + local64_add(new - prev, &event->count); +} + +static const struct l3cache_event_ops event_ops_std = { + .start = qcom_l3_cache__32bit_counter_start, + .stop = qcom_l3_cache__32bit_counter_stop, + .update = qcom_l3_cache__32bit_counter_update, +}; + +/* Retrieve the appropriate operations for the given event */ +static +const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event) +{ + if (event_uses_long_counter(event)) + return &event_ops_long; + else + return &event_ops_std; +} + +/* + * Top level PMU functions. + */ + +static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu) +{ + int i; + + writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR); + + /* + * Use writel for the first programming command to ensure the basic + * counter unit is stopped before proceeding + */ + writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR); + + writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR); + writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR); + writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR); + writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG); + writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL); + writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR); + + for (i = 0; i < L3_NUM_COUNTERS; ++i) { + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i)); + writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i)); + } + + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM); + + /* + * Use writel here to ensure all programming commands are done + * before proceeding + */ + writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); +} + +static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data) +{ + struct l3cache_pmu *l3pmu = data; + /* Read the overflow status register */ + long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR); + int idx; + + if (status == 0) + return IRQ_NONE; + + /* Clear the bits we read on the overflow status register */ + writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR); + + for_each_set_bit(idx, &status, L3_NUM_COUNTERS) { + struct perf_event *event; + const struct l3cache_event_ops *ops; + + event = l3pmu->events[idx]; + if (!event) + continue; + + /* + * Since the IRQ is not enabled for events using long counters + * we should never see one of those here, however, be consistent + * and use the ops indirections like in the other operations. + */ + + ops = l3cache_event_get_ops(event); + ops->update(event); + } + + return IRQ_HANDLED; +} + +/* + * Implementation of abstract pmu functionality required by + * the core perf events code. + */ + +static void qcom_l3_cache__pmu_enable(struct pmu *pmu) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); + + /* Ensure the other programming commands are observed before enabling */ + wmb(); + + writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); +} + +static void qcom_l3_cache__pmu_disable(struct pmu *pmu) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); + + writel_relaxed(0, l3pmu->regs + L3_M_BC_CR); + + /* Ensure the basic counter unit is stopped before proceeding */ + wmb(); +} + +/* + * We must NOT create groups containing events from multiple hardware PMUs, + * although mixing different software and hardware PMUs is allowed. + */ +static bool qcom_l3_cache__validate_event_group(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct perf_event *sibling; + int counters = 0; + + if (leader->pmu != event->pmu && !is_software_event(leader)) + return false; + + counters = event_num_counters(event); + counters += event_num_counters(leader); + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (is_software_event(sibling)) + continue; + if (sibling->pmu != event->pmu) + return false; + counters += event_num_counters(sibling); + } + + /* + * If the group requires more counters than the HW has, it + * cannot ever be scheduled. + */ + return counters <= L3_NUM_COUNTERS; +} + +static int qcom_l3_cache__event_init(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * Is the event for this PMU? + */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * There are no per-counter mode filters in the PMU. + */ + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_hv || event->attr.exclude_idle) + return -EINVAL; + + /* + * Sampling not supported since these events are not core-attributable. + */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Task mode not available, we run the counters as socket counters, + * not attributable to any CPU and therefore cannot attribute per-task. + */ + if (event->cpu < 0) + return -EINVAL; + + /* Validate the group */ + if (!qcom_l3_cache__validate_event_group(event)) + return -EINVAL; + + hwc->idx = -1; + + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, like this one, where + * each event could be theoretically assigned to a different CPU. + * To mitigate this, we enforce CPU assignment to one designated + * processor (the one described in the "cpumask" attribute exported + * by the PMU device). perf user space tools honor this and avoid + * opening more than one copy of the events. + */ + event->cpu = cpumask_first(&l3pmu->cpumask); + + return 0; +} + +static void qcom_l3_cache__event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + hwc->state = 0; + ops->start(event); +} + +static void qcom_l3_cache__event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + if (hwc->state & PERF_HES_STOPPED) + return; + + ops->stop(event, flags); + if (flags & PERF_EF_UPDATE) + ops->update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int qcom_l3_cache__event_add(struct perf_event *event, int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int order = event_uses_long_counter(event) ? 1 : 0; + int idx; + + /* + * Try to allocate a counter. + */ + idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order); + if (idx < 0) + /* The counters are all in use. */ + return -EAGAIN; + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + l3pmu->events[idx] = event; + + if (flags & PERF_EF_START) + qcom_l3_cache__event_start(event, 0); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void qcom_l3_cache__event_del(struct perf_event *event, int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int order = event_uses_long_counter(event) ? 1 : 0; + + /* Stop and clean up */ + qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE); + l3pmu->events[hwc->idx] = NULL; + bitmap_release_region(l3pmu->used_mask, hwc->idx, order); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); +} + +static void qcom_l3_cache__event_read(struct perf_event *event) +{ + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + ops->update(event); +} + +/* + * Add sysfs attributes + * + * We export: + * - formats, used by perf user space and other tools to configure events + * - events, used by perf user space and other tools to create events + * symbolically, e.g.: + * perf stat -a -e l3cache_0_0/event=read-miss/ ls + * perf stat -a -e l3cache_0_0/event=0x21/ ls + * - cpumask, used by perf user space and other tools to know on which CPUs + * to open the events + */ + +/* formats */ + +static ssize_t l3cache_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "%s\n", (char *) eattr->var); +} + +#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *qcom_l3_cache_pmu_formats[] = { + L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"), + L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)), + NULL, +}; + +static struct attribute_group qcom_l3_cache_pmu_format_group = { + .name = "format", + .attrs = qcom_l3_cache_pmu_formats, +}; + +/* events */ + +static ssize_t l3cache_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define L3CACHE_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *qcom_l3_cache_pmu_events[] = { + L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES), + L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT), + L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS), + L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D), + L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D), + L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT), + L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS), + NULL +}; + +static struct attribute_group qcom_l3_cache_pmu_events_group = { + .name = "events", + .attrs = qcom_l3_cache_pmu_events, +}; + +/* cpumask */ + +static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask); +} + +static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL); + +static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = { + .attrs = qcom_l3_cache_pmu_cpumask_attrs, +}; + +/* + * Per PMU device attribute groups + */ +static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = { + &qcom_l3_cache_pmu_format_group, + &qcom_l3_cache_pmu_events_group, + &qcom_l3_cache_pmu_cpumask_attr_group, + NULL, +}; + +/* + * Probing functions and data. + */ + +static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); + + /* If there is not a CPU/PMU association pick this CPU */ + if (cpumask_empty(&l3pmu->cpumask)) + cpumask_set_cpu(cpu, &l3pmu->cpumask); + + return 0; +} + +static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&l3pmu->pmu, cpu, target); + cpumask_set_cpu(target, &l3pmu->cpumask); + return 0; +} + +static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) +{ + struct l3cache_pmu *l3pmu; + struct acpi_device *acpi_dev; + struct resource *memrc; + int ret; + char *name; + + /* Initialize the PMU data structures */ + + acpi_dev = ACPI_COMPANION(&pdev->dev); + if (!acpi_dev) + return -ENODEV; + + l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s", + acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id); + if (!l3pmu || !name) + return -ENOMEM; + + l3pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + + .pmu_enable = qcom_l3_cache__pmu_enable, + .pmu_disable = qcom_l3_cache__pmu_disable, + .event_init = qcom_l3_cache__event_init, + .add = qcom_l3_cache__event_add, + .del = qcom_l3_cache__event_del, + .start = qcom_l3_cache__event_start, + .stop = qcom_l3_cache__event_stop, + .read = qcom_l3_cache__event_read, + + .attr_groups = qcom_l3_cache_pmu_attr_grps, + }; + + memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc); + if (IS_ERR(l3pmu->regs)) { + dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start); + return PTR_ERR(l3pmu->regs); + } + + qcom_l3_cache__init(l3pmu); + + ret = platform_get_irq(pdev, 0); + if (ret <= 0) + return ret; + + ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0, + name, l3pmu); + if (ret) { + dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n", + &memrc->start); + return ret; + } + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug", ret); + return ret; + } + + ret = perf_pmu_register(&l3pmu->pmu, name, -1); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret); + return ret; + } + + dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type); + + return 0; +} + +static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = { + { "QCOM8081", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match); + +static struct platform_driver qcom_l3_cache_pmu_driver = { + .driver = { + .name = "qcom-l3cache-pmu", + .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + }, + .probe = qcom_l3_cache_pmu_probe, +}; + +static int __init register_qcom_l3_cache_pmu_driver(void) +{ + int ret; + + /* Install a hook to update the reader CPU in case it goes offline */ + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + "perf/qcom/l3cache:online", + qcom_l3_cache_pmu_online_cpu, + qcom_l3_cache_pmu_offline_cpu); + if (ret) + return ret; + + return platform_driver_register(&qcom_l3_cache_pmu_driver); +} +device_initcall(register_qcom_l3_cache_pmu_driver); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 62d240e962f0..cfcfab37d9c4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -137,6 +137,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_ONLINE_DYN, -- cgit v1.2.3 From 65c2e69b3ccaa359032cfc35c4dbb8d235f63e5b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 23 Mar 2017 19:00:44 +0000 Subject: include: pe.h: allow for use in assembly Some of the definitions in include/linux/pe.h would be useful for the EFI stub headers, where values are currently open-coded. Unfortunately they cannot be used as some structures are also defined in pe.h without !__ASSEMBLY__ guards. This patch moves the structure definitions into an #ifdef __ASSEMBLY__ block, so that the common value definitions can be used from assembly. Signed-off-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- include/linux/pe.h | 174 +++++++++++++++++++++++++++-------------------------- 1 file changed, 89 insertions(+), 85 deletions(-) (limited to 'include') diff --git a/include/linux/pe.h b/include/linux/pe.h index e170b95e763b..a8a594117df3 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -23,34 +23,6 @@ #define MZ_MAGIC 0x5a4d /* "MZ" */ -struct mz_hdr { - uint16_t magic; /* MZ_MAGIC */ - uint16_t lbsize; /* size of last used block */ - uint16_t blocks; /* pages in file, 0x3 */ - uint16_t relocs; /* relocations */ - uint16_t hdrsize; /* header size in "paragraphs" */ - uint16_t min_extra_pps; /* .bss */ - uint16_t max_extra_pps; /* runtime limit for the arena size */ - uint16_t ss; /* relative stack segment */ - uint16_t sp; /* initial %sp register */ - uint16_t checksum; /* word checksum */ - uint16_t ip; /* initial %ip register */ - uint16_t cs; /* initial %cs relative to load segment */ - uint16_t reloc_table_offset; /* offset of the first relocation */ - uint16_t overlay_num; /* overlay number. set to 0. */ - uint16_t reserved0[4]; /* reserved */ - uint16_t oem_id; /* oem identifier */ - uint16_t oem_info; /* oem specific */ - uint16_t reserved1[10]; /* reserved */ - uint32_t peaddr; /* address of pe header */ - char message[64]; /* message to print */ -}; - -struct mz_reloc { - uint16_t offset; - uint16_t segment; -}; - #define PE_MAGIC 0x00004550 /* "PE\0\0" */ #define PE_OPT_MAGIC_PE32 0x010b #define PE_OPT_MAGIC_PE32_ROM 0x0107 @@ -98,17 +70,6 @@ struct mz_reloc { #define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 #define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 -struct pe_hdr { - uint32_t magic; /* PE magic */ - uint16_t machine; /* machine type */ - uint16_t sections; /* number of sections */ - uint32_t timestamp; /* time_t */ - uint32_t symbol_table; /* symbol table offset */ - uint32_t symbols; /* number of symbols */ - uint16_t opt_hdr_size; /* size of optional header */ - uint16_t flags; /* flags */ -}; - #define IMAGE_FILE_OPT_ROM_MAGIC 0x107 #define IMAGE_FILE_OPT_PE32_MAGIC 0x10b #define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b @@ -134,6 +95,93 @@ struct pe_hdr { #define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 #define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 +/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ +#define IMAGE_SCN_RESERVED_0 0x00000001 +#define IMAGE_SCN_RESERVED_1 0x00000002 +#define IMAGE_SCN_RESERVED_2 0x00000004 +#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ +#define IMAGE_SCN_RESERVED_3 0x00000010 +#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ +#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ +#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ +#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ +#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ +#define IMAGE_SCN_RESERVED_4 0x00000400 +#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ +#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ +#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ +#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ +#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ +/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ +#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ +#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ +#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ +#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ +/* and here they just stuck a 1-byte integer in the middle of a bitfield */ +#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ +#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 +#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 +#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 +#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 +#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 +#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 +#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 +#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 +#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 +#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 +#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 +#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 +#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 +#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ +#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ +#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ +#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ +#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ +#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ +#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ +#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ + +#ifndef __ASSEMBLY__ + +struct mz_hdr { + uint16_t magic; /* MZ_MAGIC */ + uint16_t lbsize; /* size of last used block */ + uint16_t blocks; /* pages in file, 0x3 */ + uint16_t relocs; /* relocations */ + uint16_t hdrsize; /* header size in "paragraphs" */ + uint16_t min_extra_pps; /* .bss */ + uint16_t max_extra_pps; /* runtime limit for the arena size */ + uint16_t ss; /* relative stack segment */ + uint16_t sp; /* initial %sp register */ + uint16_t checksum; /* word checksum */ + uint16_t ip; /* initial %ip register */ + uint16_t cs; /* initial %cs relative to load segment */ + uint16_t reloc_table_offset; /* offset of the first relocation */ + uint16_t overlay_num; /* overlay number. set to 0. */ + uint16_t reserved0[4]; /* reserved */ + uint16_t oem_id; /* oem identifier */ + uint16_t oem_info; /* oem specific */ + uint16_t reserved1[10]; /* reserved */ + uint32_t peaddr; /* address of pe header */ + char message[64]; /* message to print */ +}; + +struct mz_reloc { + uint16_t offset; + uint16_t segment; +}; + +struct pe_hdr { + uint32_t magic; /* PE magic */ + uint16_t machine; /* machine type */ + uint16_t sections; /* number of sections */ + uint32_t timestamp; /* time_t */ + uint32_t symbol_table; /* symbol table offset */ + uint32_t symbols; /* number of symbols */ + uint16_t opt_hdr_size; /* size of optional header */ + uint16_t flags; /* flags */ +}; + /* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't * work right. vomit. */ struct pe32_opt_hdr { @@ -243,52 +291,6 @@ struct section_header { uint32_t flags; }; -/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ -#define IMAGE_SCN_RESERVED_0 0x00000001 -#define IMAGE_SCN_RESERVED_1 0x00000002 -#define IMAGE_SCN_RESERVED_2 0x00000004 -#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ -#define IMAGE_SCN_RESERVED_3 0x00000010 -#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ -#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ -#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ -#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ -#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ -#define IMAGE_SCN_RESERVED_4 0x00000400 -#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ -#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ -#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ -#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ -#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ -/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ -#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ -#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ -#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ -#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ -/* and here they just stuck a 1-byte integer in the middle of a bitfield */ -#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ -#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 -#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 -#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 -#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 -#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 -#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 -#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 -#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 -#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 -#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 -#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 -#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 -#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 -#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ -#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ -#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ -#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ -#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ -#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ -#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ -#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ - enum x64_coff_reloc_type { IMAGE_REL_AMD64_ABSOLUTE = 0, IMAGE_REL_AMD64_ADDR64, @@ -445,4 +447,6 @@ struct win_certificate { uint16_t cert_type; }; +#endif /* !__ASSEMBLY__ */ + #endif /* __LINUX_PE_H */ -- cgit v1.2.3 From 6f5541ba0eed842445a99b411d0f34103bcbbea1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 23 Mar 2017 19:00:45 +0000 Subject: include: pe.h: add some missing definitions Add the missing IMAGE_FILE_MACHINE_ARM64 and IMAGE_DEBUG_TYPE_CODEVIEW definitions. We'll need them for the arm64 EFI stub... Signed-off-by: Mark Rutland [ardb: add IMAGE_DEBUG_TYPE_CODEVIEW as well] Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- include/linux/pe.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/pe.h b/include/linux/pe.h index a8a594117df3..143ce75be5f0 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -34,6 +34,7 @@ #define IMAGE_FILE_MACHINE_AMD64 0x8664 #define IMAGE_FILE_MACHINE_ARM 0x01c0 #define IMAGE_FILE_MACHINE_ARMV7 0x01c4 +#define IMAGE_FILE_MACHINE_ARM64 0xaa64 #define IMAGE_FILE_MACHINE_EBC 0x0ebc #define IMAGE_FILE_MACHINE_I386 0x014c #define IMAGE_FILE_MACHINE_IA64 0x0200 @@ -141,6 +142,8 @@ #define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ #define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ +#define IMAGE_DEBUG_TYPE_CODEVIEW 2 + #ifndef __ASSEMBLY__ struct mz_hdr { -- cgit v1.2.3 From 4c546b8a34690ca858e50f2017b8bb6e358365d1 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Mon, 3 Apr 2017 11:23:54 +0900 Subject: memblock: add memblock_clear_nomap() This function, with a combination of memblock_mark_nomap(), will be used in a later kdump patch for arm64 when it temporarily isolates some range of memory from the other memory blocks in order to create a specific kernel mapping at boot time. Signed-off-by: AKASHI Takahiro Reviewed-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- include/linux/memblock.h | 1 + mm/memblock.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index bdfc65af4152..e82daffcfc44 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -93,6 +93,7 @@ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); +int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); ulong choose_memblock_flags(void); /* Low level functions */ diff --git a/mm/memblock.c b/mm/memblock.c index 696f06d17c4e..2f4ca8104ea4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -804,6 +804,18 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); } +/** + * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. + * @base: the base phys addr of the region + * @size: the size of the region + * + * Return 0 on success, -errno on failure. + */ +int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) +{ + return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); +} + /** * __next_reserved_mem_region - next function for for_each_reserved_region() * @idx: pointer to u64 loop variable -- cgit v1.2.3 From c9ca9b4e2198a4dbeb83739460d4a7ff9ffed24f Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Mon, 3 Apr 2017 11:23:55 +0900 Subject: memblock: add memblock_cap_memory_range() Add memblock_cap_memory_range() which will remove all the memblock regions except the memory range specified in the arguments. In addition, rework is done on memblock_mem_limit_remove_map() to re-implement it using memblock_cap_memory_range(). This function, like memblock_mem_limit_remove_map(), will not remove memblocks with MEMMAP_NOMAP attribute as they may be mapped and accessed later as "device memory." See the commit a571d4eb55d8 ("mm/memblock.c: add new infrastructure to address the mem limit issue"). This function is used, in a succeeding patch in the series of arm64 kdump suuport, to limit the range of usable memory, or System RAM, on crash dump kernel. (Please note that "mem=" parameter is of little use for this purpose.) Signed-off-by: AKASHI Takahiro Reviewed-by: Will Deacon Acked-by: Catalin Marinas Acked-by: Dennis Chen Cc: linux-mm@kvack.org Cc: Andrew Morton Reviewed-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- include/linux/memblock.h | 1 + mm/memblock.c | 44 +++++++++++++++++++++++++++++--------------- 2 files changed, 30 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e82daffcfc44..4ce24a376262 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -336,6 +336,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); +void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); int memblock_is_map_memory(phys_addr_t addr); diff --git a/mm/memblock.c b/mm/memblock.c index 2f4ca8104ea4..b049c9b2dba8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1543,11 +1543,37 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit) (phys_addr_t)ULLONG_MAX); } +void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) +{ + int start_rgn, end_rgn; + int i, ret; + + if (!size) + return; + + ret = memblock_isolate_range(&memblock.memory, base, size, + &start_rgn, &end_rgn); + if (ret) + return; + + /* remove all the MAP regions */ + for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) + if (!memblock_is_nomap(&memblock.memory.regions[i])) + memblock_remove_region(&memblock.memory, i); + + for (i = start_rgn - 1; i >= 0; i--) + if (!memblock_is_nomap(&memblock.memory.regions[i])) + memblock_remove_region(&memblock.memory, i); + + /* truncate the reserved regions */ + memblock_remove_range(&memblock.reserved, 0, base); + memblock_remove_range(&memblock.reserved, + base + size, (phys_addr_t)ULLONG_MAX); +} + void __init memblock_mem_limit_remove_map(phys_addr_t limit) { - struct memblock_type *type = &memblock.memory; phys_addr_t max_addr; - int i, ret, start_rgn, end_rgn; if (!limit) return; @@ -1558,19 +1584,7 @@ void __init memblock_mem_limit_remove_map(phys_addr_t limit) if (max_addr == (phys_addr_t)ULLONG_MAX) return; - ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX, - &start_rgn, &end_rgn); - if (ret) - return; - - /* remove all the MAP regions above the limit */ - for (i = end_rgn - 1; i >= start_rgn; i--) { - if (!memblock_is_nomap(&type->regions[i])) - memblock_remove_region(type, i); - } - /* truncate the reserved regions */ - memblock_remove_range(&memblock.reserved, max_addr, - (phys_addr_t)ULLONG_MAX); + memblock_cap_memory_range(0, max_addr); } static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) -- cgit v1.2.3 From 083c52144a19c69b7956aa53c913ba621f7c5ae2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2017 09:39:45 +0100 Subject: drivers/perf: arm_pmu: define armpmu_init_fn We expect an ARM PMU's init function to have a particular prototype, which we open-code in a few places. This is less than ideal, considering that we cast a void value to this type in one location, and a mismatch could easily be missed. Add a typedef so that we can ensure this is consistent. Signed-off-by: Mark Rutland Tested-by: Jeremy Linton Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 2 +- include/linux/perf/arm_pmu.h | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 316c4dcc9856..71a825df47ed 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -989,7 +989,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct pmu_probe_info *probe_table) { const struct of_device_id *of_id; - const int (*init_fn)(struct arm_pmu *); + armpmu_init_fn init_fn; struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 44f43fcf2524..4249914315a4 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -132,10 +132,12 @@ int armpmu_map_event(struct perf_event *event, [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask); +typedef int (*armpmu_init_fn)(struct arm_pmu *); + struct pmu_probe_info { unsigned int cpuid; unsigned int mask; - int (*init)(struct arm_pmu *); + armpmu_init_fn init; }; #define PMU_PROBE(_cpuid, _mask, _fn) \ -- cgit v1.2.3 From 18bfcfe51b8f60b69ab012888dea8061a9cd3381 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2017 09:39:53 +0100 Subject: drivers/perf: arm_pmu: split out platform device probe logic Now that we've split the pdev and DT probing logic from the runtime management, let's move the former into its own file. We gain a few lines due to the copyright header and includes, but this should keep the logic clearly separated, and paves the way for adding ACPI support in a similar fashion. Signed-off-by: Mark Rutland Tested-by: Jeremy Linton [will: rename nr_irqs to avoid conflict with global variable] Signed-off-by: Will Deacon --- drivers/perf/Makefile | 2 +- drivers/perf/arm_pmu.c | 226 +------------------------------------- drivers/perf/arm_pmu_platform.c | 235 ++++++++++++++++++++++++++++++++++++++++ include/linux/perf/arm_pmu.h | 7 ++ 4 files changed, 247 insertions(+), 223 deletions(-) create mode 100644 drivers/perf/arm_pmu_platform.c (limited to 'include') diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index ef0c6b210345..925cd3903029 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_ARM_PMU) += arm_pmu.o +obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f387d6153099..b3bedfa512eb 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -25,7 +24,6 @@ #include #include -#include #include static int @@ -544,7 +542,7 @@ static void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } -static void armpmu_free_irqs(struct arm_pmu *armpmu) +void armpmu_free_irqs(struct arm_pmu *armpmu) { int cpu; @@ -589,7 +587,7 @@ static int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) return 0; } -static int armpmu_request_irqs(struct arm_pmu *armpmu) +int armpmu_request_irqs(struct arm_pmu *armpmu) { int cpu, err; @@ -783,161 +781,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) &cpu_pmu->node); } -/* - * CPU PMU identification and probing. - */ -static int probe_current_pmu(struct arm_pmu *pmu, - const struct pmu_probe_info *info) -{ - int cpu = get_cpu(); - unsigned int cpuid = read_cpuid_id(); - int ret = -ENODEV; - - pr_info("probing PMU on CPU %d\n", cpu); - - for (; info->init != NULL; info++) { - if ((cpuid & info->mask) != info->cpuid) - continue; - ret = info->init(pmu); - break; - } - - put_cpu(); - return ret; -} - -static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) -{ - int cpu, ret; - struct pmu_hw_events __percpu *hw_events = pmu->hw_events; - - ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); - if (ret) - return ret; - - for_each_cpu(cpu, &pmu->supported_cpus) - per_cpu(hw_events->irq, cpu) = irq; - - return 0; -} - -static bool pmu_has_irq_affinity(struct device_node *node) -{ - return !!of_find_property(node, "interrupt-affinity", NULL); -} - -static int pmu_parse_irq_affinity(struct device_node *node, int i) -{ - struct device_node *dn; - int cpu; - - /* - * If we don't have an interrupt-affinity property, we guess irq - * affinity matches our logical CPU order, as we used to assume. - * This is fragile, so we'll warn in pmu_parse_irqs(). - */ - if (!pmu_has_irq_affinity(node)) - return i; - - dn = of_parse_phandle(node, "interrupt-affinity", i); - if (!dn) { - pr_warn("failed to parse interrupt-affinity[%d] for %s\n", - i, node->name); - return -EINVAL; - } - - /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) { - struct device_node *cpu_dn; - - cpu_dn = of_cpu_device_node_get(cpu); - of_node_put(cpu_dn); - - if (dn == cpu_dn) - break; - } - - if (cpu >= nr_cpu_ids) { - pr_warn("failed to find logical CPU for %s\n", dn->name); - } - - of_node_put(dn); - - return cpu; -} - -static int pmu_parse_irqs(struct arm_pmu *pmu) -{ - int i = 0, irqs; - struct platform_device *pdev = pmu->plat_device; - struct pmu_hw_events __percpu *hw_events = pmu->hw_events; - - irqs = platform_irq_count(pdev); - if (irqs < 0) { - pr_err("unable to count PMU IRQs\n"); - return irqs; - } - - /* - * In this case we have no idea which CPUs are covered by the PMU. - * To match our prior behaviour, we assume all CPUs in this case. - */ - if (irqs == 0) { - pr_warn("no irqs for PMU, sampling events not supported\n"); - pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - cpumask_setall(&pmu->supported_cpus); - return 0; - } - - if (irqs == 1) { - int irq = platform_get_irq(pdev, 0); - if (irq && irq_is_percpu(irq)) - return pmu_parse_percpu_irq(pmu, irq); - } - - if (!pmu_has_irq_affinity(pdev->dev.of_node)) { - pr_warn("no interrupt-affinity property for %s, guessing.\n", - of_node_full_name(pdev->dev.of_node)); - } - - /* - * Some platforms have all PMU IRQs OR'd into a single IRQ, with a - * special platdata function that attempts to demux them. - */ - if (dev_get_platdata(&pdev->dev)) - cpumask_setall(&pmu->supported_cpus); - - for (i = 0; i < irqs; i++) { - int cpu, irq; - - irq = platform_get_irq(pdev, i); - if (WARN_ON(irq <= 0)) - continue; - - if (irq_is_percpu(irq)) { - pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); - return -EINVAL; - } - - cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); - if (cpu < 0) - return cpu; - if (cpu >= nr_cpu_ids) - continue; - - if (per_cpu(hw_events->irq, cpu)) { - pr_warn("multiple PMU IRQs for the same CPU detected\n"); - return -EINVAL; - } - - per_cpu(hw_events->irq, cpu) = irq; - cpumask_set_cpu(cpu, &pmu->supported_cpus); - } - - return 0; -} - -static struct arm_pmu *armpmu_alloc(void) +struct arm_pmu *armpmu_alloc(void) { struct arm_pmu *pmu; int cpu; @@ -994,7 +838,7 @@ out: return NULL; } -static void armpmu_free(struct arm_pmu *pmu) +void armpmu_free(struct arm_pmu *pmu) { free_percpu(pmu->hw_events); kfree(pmu); @@ -1025,68 +869,6 @@ out_destroy: return ret; } -int arm_pmu_device_probe(struct platform_device *pdev, - const struct of_device_id *of_table, - const struct pmu_probe_info *probe_table) -{ - const struct of_device_id *of_id; - armpmu_init_fn init_fn; - struct device_node *node = pdev->dev.of_node; - struct arm_pmu *pmu; - int ret = -ENODEV; - - pmu = armpmu_alloc(); - if (!pmu) - return -ENOMEM; - - pmu->plat_device = pdev; - - ret = pmu_parse_irqs(pmu); - if (ret) - goto out_free; - - if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { - init_fn = of_id->data; - - pmu->secure_access = of_property_read_bool(pdev->dev.of_node, - "secure-reg-access"); - - /* arm64 systems boot only as non-secure */ - if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { - pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); - pmu->secure_access = false; - } - - ret = init_fn(pmu); - } else if (probe_table) { - cpumask_setall(&pmu->supported_cpus); - ret = probe_current_pmu(pmu, probe_table); - } - - if (ret) { - pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); - goto out_free; - } - - ret = armpmu_request_irqs(pmu); - if (ret) - goto out_free_irqs; - - ret = armpmu_register(pmu); - if (ret) - goto out_free; - - return 0; - -out_free_irqs: - armpmu_free_irqs(pmu); -out_free: - pr_info("%s: failed to register PMU devices!\n", - of_node_full_name(node)); - armpmu_free(pmu); - return ret; -} - static int arm_pmu_hp_init(void) { int ret; diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c new file mode 100644 index 000000000000..69255f53057a --- /dev/null +++ b/drivers/perf/arm_pmu_platform.c @@ -0,0 +1,235 @@ +/* + * platform_device probing code for ARM performance counters. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int probe_current_pmu(struct arm_pmu *pmu, + const struct pmu_probe_info *info) +{ + int cpu = get_cpu(); + unsigned int cpuid = read_cpuid_id(); + int ret = -ENODEV; + + pr_info("probing PMU on CPU %d\n", cpu); + + for (; info->init != NULL; info++) { + if ((cpuid & info->mask) != info->cpuid) + continue; + ret = info->init(pmu); + break; + } + + put_cpu(); + return ret; +} + +static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) +{ + int cpu, ret; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) + return ret; + + for_each_cpu(cpu, &pmu->supported_cpus) + per_cpu(hw_events->irq, cpu) = irq; + + return 0; +} + +static bool pmu_has_irq_affinity(struct device_node *node) +{ + return !!of_find_property(node, "interrupt-affinity", NULL); +} + +static int pmu_parse_irq_affinity(struct device_node *node, int i) +{ + struct device_node *dn; + int cpu; + + /* + * If we don't have an interrupt-affinity property, we guess irq + * affinity matches our logical CPU order, as we used to assume. + * This is fragile, so we'll warn in pmu_parse_irqs(). + */ + if (!pmu_has_irq_affinity(node)) + return i; + + dn = of_parse_phandle(node, "interrupt-affinity", i); + if (!dn) { + pr_warn("failed to parse interrupt-affinity[%d] for %s\n", + i, node->name); + return -EINVAL; + } + + /* Now look up the logical CPU number */ + for_each_possible_cpu(cpu) { + struct device_node *cpu_dn; + + cpu_dn = of_cpu_device_node_get(cpu); + of_node_put(cpu_dn); + + if (dn == cpu_dn) + break; + } + + if (cpu >= nr_cpu_ids) { + pr_warn("failed to find logical CPU for %s\n", dn->name); + } + + of_node_put(dn); + + return cpu; +} + +static int pmu_parse_irqs(struct arm_pmu *pmu) +{ + int i = 0, num_irqs; + struct platform_device *pdev = pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) { + pr_err("unable to count PMU IRQs\n"); + return num_irqs; + } + + /* + * In this case we have no idea which CPUs are covered by the PMU. + * To match our prior behaviour, we assume all CPUs in this case. + */ + if (num_irqs == 0) { + pr_warn("no irqs for PMU, sampling events not supported\n"); + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + cpumask_setall(&pmu->supported_cpus); + return 0; + } + + if (num_irqs == 1) { + int irq = platform_get_irq(pdev, 0); + if (irq && irq_is_percpu(irq)) + return pmu_parse_percpu_irq(pmu, irq); + } + + if (!pmu_has_irq_affinity(pdev->dev.of_node)) { + pr_warn("no interrupt-affinity property for %s, guessing.\n", + of_node_full_name(pdev->dev.of_node)); + } + + /* + * Some platforms have all PMU IRQs OR'd into a single IRQ, with a + * special platdata function that attempts to demux them. + */ + if (dev_get_platdata(&pdev->dev)) + cpumask_setall(&pmu->supported_cpus); + + for (i = 0; i < num_irqs; i++) { + int cpu, irq; + + irq = platform_get_irq(pdev, i); + if (WARN_ON(irq <= 0)) + continue; + + if (irq_is_percpu(irq)) { + pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); + return -EINVAL; + } + + cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); + if (cpu < 0) + return cpu; + if (cpu >= nr_cpu_ids) + continue; + + if (per_cpu(hw_events->irq, cpu)) { + pr_warn("multiple PMU IRQs for the same CPU detected\n"); + return -EINVAL; + } + + per_cpu(hw_events->irq, cpu) = irq; + cpumask_set_cpu(cpu, &pmu->supported_cpus); + } + + return 0; +} + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table) +{ + const struct of_device_id *of_id; + armpmu_init_fn init_fn; + struct device_node *node = pdev->dev.of_node; + struct arm_pmu *pmu; + int ret = -ENODEV; + + pmu = armpmu_alloc(); + if (!pmu) + return -ENOMEM; + + pmu->plat_device = pdev; + + ret = pmu_parse_irqs(pmu); + if (ret) + goto out_free; + + if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { + init_fn = of_id->data; + + pmu->secure_access = of_property_read_bool(pdev->dev.of_node, + "secure-reg-access"); + + /* arm64 systems boot only as non-secure */ + if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { + pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); + pmu->secure_access = false; + } + + ret = init_fn(pmu); + } else if (probe_table) { + cpumask_setall(&pmu->supported_cpus); + ret = probe_current_pmu(pmu, probe_table); + } + + if (ret) { + pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); + goto out_free; + } + + ret = armpmu_request_irqs(pmu); + if (ret) + goto out_free_irqs; + + ret = armpmu_register(pmu); + if (ret) + goto out_free; + + return 0; + +out_free_irqs: + armpmu_free_irqs(pmu); +out_free: + pr_info("%s: failed to register PMU devices!\n", + of_node_full_name(node)); + armpmu_free(pmu); + return ret; +} diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 4249914315a4..25556ebb1c7b 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -159,6 +159,13 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table); +/* Internal functions only for core arm_pmu code */ +struct arm_pmu *armpmu_alloc(void); +void armpmu_free(struct arm_pmu *pmu); +int armpmu_register(struct arm_pmu *pmu); +int armpmu_request_irqs(struct arm_pmu *armpmu); +void armpmu_free_irqs(struct arm_pmu *armpmu); + #define ARMV8_PMU_PDEV_NAME "armv8-pmu" #endif /* CONFIG_ARM_PMU */ -- cgit v1.2.3 From 45736a72fb79b204c1fbdb08a1e1a2aa52c7281a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 11 Apr 2017 09:39:55 +0100 Subject: drivers/perf: arm_pmu: add ACPI framework This patch adds framework code to handle parsing PMU data out of the MADT, sanity checking this, and managing the association of CPUs (and their interrupts) with appropriate logical PMUs. For the time being, we expect that only one PMU driver (PMUv3) will make use of this, and we simply pass in a single probe function. This is based on an earlier patch from Jeremy Linton. Signed-off-by: Mark Rutland Tested-by: Jeremy Linton Cc: Will Deacon Signed-off-by: Will Deacon --- drivers/perf/Kconfig | 4 + drivers/perf/Makefile | 1 + drivers/perf/arm_pmu.c | 4 +- drivers/perf/arm_pmu_acpi.c | 256 +++++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + include/linux/perf/arm_pmu.h | 11 ++ 6 files changed, 275 insertions(+), 2 deletions(-) create mode 100644 drivers/perf/arm_pmu_acpi.c (limited to 'include') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index c436e0d303e7..aa587edaf9ea 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -12,6 +12,10 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config ARM_PMU_ACPI + depends on ARM_PMU && ACPI + def_bool y + config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 925cd3903029..6420bd4394d5 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o +obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index b3bedfa512eb..dc459eb1246b 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -525,7 +525,7 @@ int perf_num_counters(void) } EXPORT_SYMBOL_GPL(perf_num_counters); -static void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) +void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) { struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; int irq = per_cpu(hw_events->irq, cpu); @@ -550,7 +550,7 @@ void armpmu_free_irqs(struct arm_pmu *armpmu) armpmu_free_irq(armpmu, cpu); } -static int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) +int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) { int err = 0; struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c new file mode 100644 index 000000000000..34c862f213c7 --- /dev/null +++ b/drivers/perf/arm_pmu_acpi.c @@ -0,0 +1,256 @@ +/* + * ACPI probing code for ARM performance counters. + * + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include + +static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus); +static DEFINE_PER_CPU(int, pmu_irqs); + +static int arm_pmu_acpi_register_irq(int cpu) +{ + struct acpi_madt_generic_interrupt *gicc; + int gsi, trigger; + + gicc = acpi_cpu_get_madt_gicc(cpu); + if (WARN_ON(!gicc)) + return -EINVAL; + + gsi = gicc->performance_interrupt; + if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) + trigger = ACPI_EDGE_SENSITIVE; + else + trigger = ACPI_LEVEL_SENSITIVE; + + /* + * Helpfully, the MADT GICC doesn't have a polarity flag for the + * "performance interrupt". Luckily, on compliant GICs the polarity is + * a fixed value in HW (for both SPIs and PPIs) that we cannot change + * from SW. + * + * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This + * may not match the real polarity, but that should not matter. + * + * Other interrupt controllers are not supported with ACPI. + */ + return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); +} + +static void arm_pmu_acpi_unregister_irq(int cpu) +{ + struct acpi_madt_generic_interrupt *gicc; + int gsi; + + gicc = acpi_cpu_get_madt_gicc(cpu); + if (!gicc) + return; + + gsi = gicc->performance_interrupt; + acpi_unregister_gsi(gsi); +} + +static int arm_pmu_acpi_parse_irqs(void) +{ + int irq, cpu, irq_cpu, err; + + for_each_possible_cpu(cpu) { + irq = arm_pmu_acpi_register_irq(cpu); + if (irq < 0) { + err = irq; + pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", + cpu, err); + goto out_err; + } else if (irq == 0) { + pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); + } + + per_cpu(pmu_irqs, cpu) = irq; + } + + return 0; + +out_err: + for_each_possible_cpu(cpu) { + irq = per_cpu(pmu_irqs, cpu); + if (!irq) + continue; + + arm_pmu_acpi_unregister_irq(cpu); + + /* + * Blat all copies of the IRQ so that we only unregister the + * corresponding GSI once (e.g. when we have PPIs). + */ + for_each_possible_cpu(irq_cpu) { + if (per_cpu(pmu_irqs, irq_cpu) == irq) + per_cpu(pmu_irqs, irq_cpu) = 0; + } + } + + return err; +} + +static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) +{ + unsigned long cpuid = read_cpuid_id(); + struct arm_pmu *pmu; + int cpu; + + for_each_possible_cpu(cpu) { + pmu = per_cpu(probed_pmus, cpu); + if (!pmu || pmu->acpi_cpuid != cpuid) + continue; + + return pmu; + } + + pmu = armpmu_alloc(); + if (!pmu) { + pr_warn("Unable to allocate PMU for CPU%d\n", + smp_processor_id()); + return NULL; + } + + pmu->acpi_cpuid = cpuid; + + return pmu; +} + +/* + * This must run before the common arm_pmu hotplug logic, so that we can + * associate a CPU and its interrupt before the common code tries to manage the + * affinity and so on. + * + * Note that hotplug events are serialized, so we cannot race with another CPU + * coming up. The perf core won't open events while a hotplug event is in + * progress. + */ +static int arm_pmu_acpi_cpu_starting(unsigned int cpu) +{ + struct arm_pmu *pmu; + struct pmu_hw_events __percpu *hw_events; + int irq; + + /* If we've already probed this CPU, we have nothing to do */ + if (per_cpu(probed_pmus, cpu)) + return 0; + + irq = per_cpu(pmu_irqs, cpu); + + pmu = arm_pmu_acpi_find_alloc_pmu(); + if (!pmu) + return -ENOMEM; + + cpumask_set_cpu(cpu, &pmu->supported_cpus); + + per_cpu(probed_pmus, cpu) = pmu; + + /* + * Log and request the IRQ so the core arm_pmu code can manage it. In + * some situations (e.g. mismatched PPIs), we may fail to request the + * IRQ. However, it may be too late for us to do anything about it. + * The common ARM PMU code will log a warning in this case. + */ + hw_events = pmu->hw_events; + per_cpu(hw_events->irq, cpu) = irq; + armpmu_request_irq(pmu, cpu); + + /* + * Ideally, we'd probe the PMU here when we find the first matching + * CPU. We can't do that for several reasons; see the comment in + * arm_pmu_acpi_init(). + * + * So for the time being, we're done. + */ + return 0; +} + +int arm_pmu_acpi_probe(armpmu_init_fn init_fn) +{ + int pmu_idx = 0; + int cpu, ret; + + if (acpi_disabled) + return 0; + + /* + * Initialise and register the set of PMUs which we know about right + * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we + * could handle late hotplug, but this may lead to deadlock since we + * might try to register a hotplug notifier instance from within a + * hotplug notifier. + * + * There's also the problem of having access to the right init_fn, + * without tying this too deeply into the "real" PMU driver. + * + * For the moment, as with the platform/DT case, we need at least one + * of a PMU's CPUs to be online at probe time. + */ + for_each_possible_cpu(cpu) { + struct arm_pmu *pmu = per_cpu(probed_pmus, cpu); + char *base_name; + + if (!pmu || pmu->name) + continue; + + ret = init_fn(pmu); + if (ret == -ENODEV) { + /* PMU not handled by this driver, or not present */ + continue; + } else if (ret) { + pr_warn("Unable to initialise PMU for CPU%d\n", cpu); + return ret; + } + + base_name = pmu->name; + pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++); + if (!pmu->name) { + pr_warn("Unable to allocate PMU name for CPU%d\n", cpu); + return -ENOMEM; + } + + ret = armpmu_register(pmu); + if (ret) { + pr_warn("Failed to register PMU for CPU%d\n", cpu); + return ret; + } + } + + return 0; +} + +static int arm_pmu_acpi_init(void) +{ + int ret; + + if (acpi_disabled) + return 0; + + /* + * We can't request IRQs yet, since we don't know the cookie value + * until we know which CPUs share the same logical PMU. We'll handle + * that in arm_pmu_acpi_cpu_starting(). + */ + ret = arm_pmu_acpi_parse_irqs(); + if (ret) + return ret; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING, + "perf/arm/pmu_acpi:starting", + arm_pmu_acpi_cpu_starting, NULL); + + return ret; +} +subsys_initcall(arm_pmu_acpi_init) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index cfcfab37d9c4..0f2a80377520 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -94,6 +94,7 @@ enum cpuhp_state { CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, + CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 25556ebb1c7b..1360dd6d5e61 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -117,6 +117,9 @@ struct arm_pmu { struct notifier_block cpu_pm_nb; /* the attr_groups array must be NULL-terminated */ const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; + + /* Only to be used by ACPI probing code */ + unsigned long acpi_cpuid; }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) @@ -159,12 +162,20 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table); +#ifdef CONFIG_ACPI +int arm_pmu_acpi_probe(armpmu_init_fn init_fn); +#else +static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } +#endif + /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); void armpmu_free(struct arm_pmu *pmu); int armpmu_register(struct arm_pmu *pmu); int armpmu_request_irqs(struct arm_pmu *armpmu); void armpmu_free_irqs(struct arm_pmu *armpmu); +int armpmu_request_irq(struct arm_pmu *armpmu, int cpu); +void armpmu_free_irq(struct arm_pmu *armpmu, int cpu); #define ARMV8_PMU_PDEV_NAME "armv8-pmu" -- cgit v1.2.3