From f7b3ea8cf72f3d6060fe08e461805181e7450a13 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 27 Dec 2022 10:29:04 +0800 Subject: genirq/affinity: Move group_cpus_evenly() into lib/ group_cpus_evenly() has become a generic function which can be used for other subsystems than the interrupt subsystem, so move it into lib/. Signed-off-by: Ming Lei Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Reviewed-by: Jens Axboe Link: https://lore.kernel.org/r/20221227022905.352674-6-ming.lei@redhat.com --- include/linux/group_cpus.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 include/linux/group_cpus.h (limited to 'include/linux') diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h new file mode 100644 index 000000000000..e42807ec61f6 --- /dev/null +++ b/include/linux/group_cpus.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Thomas Gleixner. + * Copyright (C) 2016-2017 Christoph Hellwig. + */ + +#ifndef __LINUX_GROUP_CPUS_H +#define __LINUX_GROUP_CPUS_H +#include +#include + +struct cpumask *group_cpus_evenly(unsigned int numgrps); + +#endif -- cgit v1.2.3 From 835a486cd9f55790dee9f6b67ce0057d49f15da5 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Jan 2023 19:42:15 +0530 Subject: genirq: Add mechanism to multiplex a single HW IPI All RISC-V platforms have a single HW IPI provided by the INTC local interrupt controller. The HW method to trigger INTC IPI can be through external irqchip (e.g. RISC-V AIA), through platform specific device (e.g. SiFive CLINT timer), or through firmware (e.g. SBI IPI call). To support multiple IPIs on RISC-V, add a generic IPI multiplexing mechanism which help us create multiple virtual IPIs using a single HW IPI. This generic IPI multiplexing is inspired by the Apple AIC irqchip driver and it is shared by various RISC-V irqchip drivers. Signed-off-by: Anup Patel Reviewed-by: Hector Martin Tested-by: Hector Martin Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230103141221.772261-4-apatel@ventanamicro.com --- include/linux/irq.h | 3 + kernel/irq/Kconfig | 5 ++ kernel/irq/Makefile | 1 + kernel/irq/ipi-mux.c | 207 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 216 insertions(+) create mode 100644 kernel/irq/ipi-mux.c (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index c3eb89606c2b..b1b28affb32a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1266,6 +1266,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_mask(unsigned int virq, const struct cpumask *dest); +void ipi_mux_process(void); +int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu)); + #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER /* * Registers a generic IRQ handling function as the top-level IRQ handler in diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index b64c44ae4c25..2531f3496ab6 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -86,6 +86,11 @@ config GENERIC_IRQ_IPI depends on SMP select IRQ_DOMAIN_HIERARCHY +# Generic IRQ IPI Mux support +config GENERIC_IRQ_IPI_MUX + bool + depends on SMP + # Generic MSI hierarchical interrupt domain support config GENERIC_MSI_IRQ bool diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index b4f53717d143..f19d3080bf11 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o +obj-$(CONFIG_GENERIC_IRQ_IPI_MUX) += ipi-mux.o obj-$(CONFIG_SMP) += affinity.o obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o diff --git a/kernel/irq/ipi-mux.c b/kernel/irq/ipi-mux.c new file mode 100644 index 000000000000..3a403c3a785d --- /dev/null +++ b/kernel/irq/ipi-mux.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Multiplex several virtual IPIs over a single HW IPI. + * + * Copyright The Asahi Linux Contributors + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "ipi-mux: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ipi_mux_cpu { + atomic_t enable; + atomic_t bits; +}; + +static struct ipi_mux_cpu __percpu *ipi_mux_pcpu; +static struct irq_domain *ipi_mux_domain; +static void (*ipi_mux_send)(unsigned int cpu); + +static void ipi_mux_mask(struct irq_data *d) +{ + struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu); + + atomic_andnot(BIT(irqd_to_hwirq(d)), &icpu->enable); +} + +static void ipi_mux_unmask(struct irq_data *d) +{ + struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu); + u32 ibit = BIT(irqd_to_hwirq(d)); + + atomic_or(ibit, &icpu->enable); + + /* + * The atomic_or() above must complete before the atomic_read() + * below to avoid racing ipi_mux_send_mask(). + */ + smp_mb__after_atomic(); + + /* If a pending IPI was unmasked, raise a parent IPI immediately. */ + if (atomic_read(&icpu->bits) & ibit) + ipi_mux_send(smp_processor_id()); +} + +static void ipi_mux_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu); + u32 ibit = BIT(irqd_to_hwirq(d)); + unsigned long pending; + int cpu; + + for_each_cpu(cpu, mask) { + icpu = per_cpu_ptr(ipi_mux_pcpu, cpu); + + /* + * This sequence is the mirror of the one in ipi_mux_unmask(); + * see the comment there. Additionally, release semantics + * ensure that the vIPI flag set is ordered after any shared + * memory accesses that precede it. This therefore also pairs + * with the atomic_fetch_andnot in ipi_mux_process(). + */ + pending = atomic_fetch_or_release(ibit, &icpu->bits); + + /* + * The atomic_fetch_or_release() above must complete + * before the atomic_read() below to avoid racing with + * ipi_mux_unmask(). + */ + smp_mb__after_atomic(); + + /* + * The flag writes must complete before the physical IPI is + * issued to another CPU. This is implied by the control + * dependency on the result of atomic_read() below, which is + * itself already ordered after the vIPI flag write. + */ + if (!(pending & ibit) && (atomic_read(&icpu->enable) & ibit)) + ipi_mux_send(cpu); + } +} + +static const struct irq_chip ipi_mux_chip = { + .name = "IPI Mux", + .irq_mask = ipi_mux_mask, + .irq_unmask = ipi_mux_unmask, + .ipi_send_mask = ipi_mux_send_mask, +}; + +static int ipi_mux_domain_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + irq_set_percpu_devid(virq + i); + irq_domain_set_info(d, virq + i, i, &ipi_mux_chip, NULL, + handle_percpu_devid_irq, NULL, NULL); + } + + return 0; +} + +static const struct irq_domain_ops ipi_mux_domain_ops = { + .alloc = ipi_mux_domain_alloc, + .free = irq_domain_free_irqs_top, +}; + +/** + * ipi_mux_process - Process multiplexed virtual IPIs + */ +void ipi_mux_process(void) +{ + struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu); + irq_hw_number_t hwirq; + unsigned long ipis; + unsigned int en; + + /* + * Reading enable mask does not need to be ordered as long as + * this function is called from interrupt handler because only + * the CPU itself can change it's own enable mask. + */ + en = atomic_read(&icpu->enable); + + /* + * Clear the IPIs we are about to handle. This pairs with the + * atomic_fetch_or_release() in ipi_mux_send_mask(). + */ + ipis = atomic_fetch_andnot(en, &icpu->bits) & en; + + for_each_set_bit(hwirq, &ipis, BITS_PER_TYPE(int)) + generic_handle_domain_irq(ipi_mux_domain, hwirq); +} + +/** + * ipi_mux_create - Create virtual IPIs multiplexed on top of a single + * parent IPI. + * @nr_ipi: number of virtual IPIs to create. This should + * be <= BITS_PER_TYPE(int) + * @mux_send: callback to trigger parent IPI for a particular CPU + * + * Returns first virq of the newly created virtual IPIs upon success + * or <=0 upon failure + */ +int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu)) +{ + struct fwnode_handle *fwnode; + struct irq_domain *domain; + int rc; + + if (ipi_mux_domain) + return -EEXIST; + + if (BITS_PER_TYPE(int) < nr_ipi || !mux_send) + return -EINVAL; + + ipi_mux_pcpu = alloc_percpu(typeof(*ipi_mux_pcpu)); + if (!ipi_mux_pcpu) + return -ENOMEM; + + fwnode = irq_domain_alloc_named_fwnode("IPI-Mux"); + if (!fwnode) { + pr_err("unable to create IPI Mux fwnode\n"); + rc = -ENOMEM; + goto fail_free_cpu; + } + + domain = irq_domain_create_linear(fwnode, nr_ipi, + &ipi_mux_domain_ops, NULL); + if (!domain) { + pr_err("unable to add IPI Mux domain\n"); + rc = -ENOMEM; + goto fail_free_fwnode; + } + + domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE; + irq_domain_update_bus_token(domain, DOMAIN_BUS_IPI); + + rc = __irq_domain_alloc_irqs(domain, -1, nr_ipi, + NUMA_NO_NODE, NULL, false, NULL); + if (rc <= 0) { + pr_err("unable to alloc IRQs from IPI Mux domain\n"); + goto fail_free_domain; + } + + ipi_mux_domain = domain; + ipi_mux_send = mux_send; + + return rc; + +fail_free_domain: + irq_domain_remove(domain); +fail_free_fwnode: + irq_domain_free_fwnode(fwnode); +fail_free_cpu: + free_percpu(ipi_mux_pcpu); + return rc; +} -- cgit v1.2.3 From 47d1932f37de99bae3345bb93f098ac8750ab0fb Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Feb 2023 11:42:50 +0100 Subject: irqdomain: Drop revmap mutex The revmap mutex is essentially only used to maintain the integrity of the radix tree during updates (lookups use RCU). As the global irq_domain_mutex is now held in all paths that update the revmap structures there is strictly no longer any need for the dedicated mutex, which can be removed. Drop the revmap mutex and add lockdep assertions to the revmap helpers to make sure that the global lock is always held when updating the revmap. Tested-by: Hsin-Yi Wang Tested-by: Mark-PK Tsai Signed-off-by: Johan Hovold Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230213104302.17307-9-johan+linaro@kernel.org --- include/linux/irqdomain.h | 2 -- kernel/irq/irqdomain.c | 13 ++++++------- 2 files changed, 6 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index a372086750ca..16399de00b48 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -143,7 +143,6 @@ struct irq_domain_chip_generic; * Revmap data, used internally by the irq domain code: * @revmap_size: Size of the linear map table @revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map - * @revmap_mutex: Lock for the revmap * @revmap: Linear table of irq_data pointers */ struct irq_domain { @@ -171,7 +170,6 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct mutex revmap_mutex; struct irq_data __rcu *revmap[]; }; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index a6d1b108b8f7..c7113e776543 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -206,7 +206,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); - mutex_init(&domain->revmap_mutex); domain->ops = ops; domain->host_data = host_data; domain->hwirq_max = hwirq_max; @@ -526,30 +525,30 @@ static bool irq_domain_is_nomap(struct irq_domain *domain) static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(domain)) return; - mutex_lock(&domain->revmap_mutex); if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], NULL); else radix_tree_delete(&domain->revmap_tree, hwirq); - mutex_unlock(&domain->revmap_mutex); } static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(domain)) return; - mutex_lock(&domain->revmap_mutex); if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], irq_data); else radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); - mutex_unlock(&domain->revmap_mutex); } static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) @@ -1580,11 +1579,12 @@ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; + lockdep_assert_held(&irq_domain_mutex); + if (irq_domain_is_nomap(d->domain)) return; /* Fix up the revmap. */ - mutex_lock(&d->domain->revmap_mutex); if (d->hwirq < d->domain->revmap_size) { /* Not using radix tree */ rcu_assign_pointer(d->domain->revmap[d->hwirq], d); @@ -1593,7 +1593,6 @@ static void irq_domain_fix_revmap(struct irq_data *d) if (slot) radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); } - mutex_unlock(&d->domain->revmap_mutex); } /** -- cgit v1.2.3 From 9dbb8e3452aba34e6fa4f63054b3adc66aceb7ec Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Feb 2023 11:43:02 +0100 Subject: irqdomain: Switch to per-domain locking The IRQ domain structures are currently protected by the global irq_domain_mutex. Switch to using more fine-grained per-domain locking, which can speed up parallel probing by reducing lock contention. On a recent arm64 laptop, the total time spent waiting for the locks during boot drops from 160 to 40 ms on average, while the maximum aggregate wait time drops from 550 to 90 ms over ten runs for example. Note that the domain lock of the root domain (innermost domain) must be used for hierarchical domains. For non-hierarchical domains (as for root domains), the new root pointer is set to the domain itself so that &domain->root->mutex always points to the right lock. Also note that hierarchical domains should be constructed using irq_domain_create_hierarchy() (or irq_domain_add_hierarchy()) to avoid having racing allocations access a not fully initialised domain. As a safeguard, the lockdep assertion in irq_domain_set_mapping() will catch any offenders that also fail to set the root domain pointer. Tested-by: Hsin-Yi Wang Tested-by: Mark-PK Tsai Signed-off-by: Johan Hovold Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230213104302.17307-21-johan+linaro@kernel.org --- include/linux/irqdomain.h | 4 ++++ kernel/irq/irqdomain.c | 59 +++++++++++++++++++++++++++++++---------------- 2 files changed, 43 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 16399de00b48..d320d15d4fba 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -125,6 +125,8 @@ struct irq_domain_chip_generic; * core code. * @flags: Per irq_domain flags * @mapcount: The number of mapped interrupts + * @mutex: Domain lock, hierarchical domains use root domain's lock + * @root: Pointer to root domain, or containing structure if non-hierarchical * * Optional elements: * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy @@ -152,6 +154,8 @@ struct irq_domain { void *host_data; unsigned int flags; unsigned int mapcount; + struct mutex mutex; + struct irq_domain *root; /* Optional data */ struct fwnode_handle *fwnode; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 6d480dc6ab53..1983f1beeec7 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -215,6 +215,17 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, domain->revmap_size = size; + /* + * Hierarchical domains use the domain lock of the root domain + * (innermost domain). + * + * For non-hierarchical domains (as for root domains), the root + * pointer is set to the domain itself so that &domain->root->mutex + * always points to the right lock. + */ + mutex_init(&domain->mutex); + domain->root = domain; + irq_domain_check_hierarchy(domain); return domain; @@ -524,7 +535,7 @@ static bool irq_domain_is_nomap(struct irq_domain *domain) static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { - lockdep_assert_held(&irq_domain_mutex); + lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; @@ -539,7 +550,11 @@ static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { - lockdep_assert_held(&irq_domain_mutex); + /* + * This also makes sure that all domains point to the same root when + * called from irq_domain_insert_irq() for each domain in a hierarchy. + */ + lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; @@ -561,7 +576,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) hwirq = irq_data->hwirq; - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); irq_set_status_flags(irq, IRQ_NOREQUEST); @@ -583,7 +598,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) /* Clear reverse map for this hwirq */ irq_domain_clear_mapping(domain, hwirq); - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); } static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq, @@ -633,9 +648,9 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, { int ret; - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); ret = irq_domain_associate_locked(domain, virq, hwirq); - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); return ret; } @@ -752,7 +767,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, return 0; } - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); @@ -763,7 +778,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); out: - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); return virq; } @@ -832,7 +847,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) type &= IRQ_TYPE_SENSE_MASK; - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); /* * If we've already configured this interrupt, @@ -892,7 +907,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) /* Store trigger type */ irqd_set_trigger_type(irq_data, type); out: - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); return virq; } @@ -1157,6 +1172,7 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data); if (domain) { + domain->root = parent->root; domain->parent = parent; domain->flags |= flags; @@ -1555,10 +1571,10 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, return -EINVAL; } - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg, realloc, affinity); - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); return ret; } @@ -1569,7 +1585,7 @@ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; - lockdep_assert_held(&irq_domain_mutex); + lockdep_assert_held(&d->domain->root->mutex); if (irq_domain_is_nomap(d->domain)) return; @@ -1635,7 +1651,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) if (!parent_irq_data) return -ENOMEM; - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); /* Copy the original irq_data. */ *parent_irq_data = *irq_data; @@ -1663,7 +1679,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) irq_domain_fix_revmap(parent_irq_data); irq_domain_set_mapping(domain, irq_data->hwirq, irq_data); error: - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); return rv; } @@ -1718,7 +1734,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq) if (WARN_ON(!parent_irq_data)) return -EINVAL; - mutex_lock(&irq_domain_mutex); + mutex_lock(&domain->root->mutex); irq_data->parent_data = NULL; @@ -1730,7 +1746,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq) irq_domain_fix_revmap(irq_data); - mutex_unlock(&irq_domain_mutex); + mutex_unlock(&domain->root->mutex); kfree(parent_irq_data); @@ -1746,17 +1762,20 @@ EXPORT_SYMBOL_GPL(irq_domain_pop_irq); void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { struct irq_data *data = irq_get_irq_data(virq); + struct irq_domain *domain; int i; if (WARN(!data || !data->domain || !data->domain->ops->free, "NULL pointer, cannot free irq\n")) return; - mutex_lock(&irq_domain_mutex); + domain = data->domain; + + mutex_lock(&domain->root->mutex); for (i = 0; i < nr_irqs; i++) irq_domain_remove_irq(virq + i); - irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs); - mutex_unlock(&irq_domain_mutex); + irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs); + mutex_unlock(&domain->root->mutex); irq_domain_free_irq_data(virq, nr_irqs); irq_free_descs(virq, nr_irqs); -- cgit v1.2.3