diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-05 23:21:47 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-05 23:21:47 +0300 |
commit | 78f860135433a8bba406352fbdcea8e8980583bf (patch) | |
tree | 0b7a9ba320e38b5d6eb0fb982bc2d9449aaf57f3 /drivers | |
parent | 18483190e7a2a6761b67c6824a31adf5b2b7be51 (diff) | |
parent | a324ca9cad4736252c33c1e28cffe1d87f262d03 (diff) | |
download | linux-78f860135433a8bba406352fbdcea8e8980583bf.tar.xz |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The interrupt departement delivers this time:
- New infrastructure to manage NMIs on platforms which have a sane
NMI delivery, i.e. identifiable NMI vectors instead of a single
lump.
- Simplification of the interrupt affinity management so drivers
don't have to implement ugly loops around the PCI/MSI enablement.
- Speedup for interrupt statistics in /proc/stat
- Provide a function to retrieve the default irq domain
- A new interrupt controller for the Loongson LS1X platform
- Affinity support for the SiFive PLIC
- Better support for the iMX irqsteer driver
- NUMA aware memory allocations for GICv3
- The usual small fixes, improvements and cleanups all over the
place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
irqchip/imx-irqsteer: Add multi output interrupts support
irqchip/imx-irqsteer: Change to use reg_num instead of irq_group
dt-bindings: irq: imx-irqsteer: Add multi output interrupts support
dt-binding: irq: imx-irqsteer: Use irq number instead of group number
irqchip/brcmstb-l2: Use _irqsave locking variants in non-interrupt code
irqchip/gicv3-its: Use NUMA aware memory allocation for ITS tables
irqdomain: Allow the default irq domain to be retrieved
irqchip/sifive-plic: Implement irq_set_affinity() for SMP host
irqchip/sifive-plic: Differentiate between PLIC handler and context
irqchip/sifive-plic: Add warning in plic_init() if handler already present
irqchip/sifive-plic: Pre-compute context hart base and enable base
PCI/MSI: Remove obsolete sanity checks for multiple interrupt sets
genirq/affinity: Remove the leftovers of the original set support
nvme-pci: Simplify interrupt allocation
genirq/affinity: Add new callback for (re)calculating interrupt sets
genirq/affinity: Store interrupt sets size in struct irq_affinity
genirq/affinity: Code consolidation
irqchip/irq-sifive-plic: Check and continue in case of an invalid cpuid.
irqchip/i8259: Fix shutdown order by moving syscore_ops registration
dt-bindings: interrupt-controller: loongson ls1x intc
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/irqchip/Kconfig | 9 | ||||
-rw-r--r-- | drivers/irqchip/Makefile | 1 | ||||
-rw-r--r-- | drivers/irqchip/irq-brcmstb-l2.c | 10 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 28 | ||||
-rw-r--r-- | drivers/irqchip/irq-i8259.c | 9 | ||||
-rw-r--r-- | drivers/irqchip/irq-imx-irqsteer.c | 115 | ||||
-rw-r--r-- | drivers/irqchip/irq-ls1x.c | 192 | ||||
-rw-r--r-- | drivers/irqchip/irq-sifive-plic.c | 116 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 117 | ||||
-rw-r--r-- | drivers/pci/msi.c | 39 | ||||
-rw-r--r-- | drivers/scsi/be2iscsi/be_main.c | 2 |
11 files changed, 443 insertions, 195 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 3d1e60779078..5dcb5456cd14 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -406,6 +406,15 @@ config IMX_IRQSTEER help Support for the i.MX IRQSTEER interrupt multiplexer/remapper. +config LS1X_IRQ + bool "Loongson-1 Interrupt Controller" + depends on MACH_LOONGSON32 + default y + select IRQ_DOMAIN + select GENERIC_IRQ_CHIP + help + Support for the Loongson-1 platform Interrupt Controller. + endmenu config SIFIVE_PLIC diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c93713d24b86..7acd0e36d0b4 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -94,3 +94,4 @@ obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o +obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 0e65f609352e..83364fedbf0a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; + unsigned long flags; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); /* Save the current mask */ b->saved_mask = irq_reg_readl(gc, ct->regs.mask); @@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); } static void brcmstb_l2_intc_resume(struct irq_data *d) @@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; + unsigned long flags; - irq_gc_lock(gc); + irq_gc_lock_irqsave(gc, flags); if (ct->chip.irq_ack) { /* Clear unmasked non-wakeup interrupts */ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, @@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) /* Restore the saved mask */ irq_reg_writel(gc, b->saved_mask, ct->regs.disable); irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); - irq_gc_unlock(gc); + irq_gc_unlock_irqrestore(gc, flags); } static int __init brcmstb_l2_intc_of_init(struct device_node *np, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c3aba3fc818d..2dd1ff0cf558 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1746,6 +1746,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, u64 type = GITS_BASER_TYPE(val); u64 baser_phys, tmp; u32 alloc_pages; + struct page *page; void *base; retry_alloc_baser: @@ -1758,10 +1759,11 @@ retry_alloc_baser: order = get_order(GITS_BASER_PAGES_MAX * psz); } - base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!base) + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) return -ENOMEM; + base = (void *)page_address(page); baser_phys = virt_to_phys(base); /* Check if the physical address of the memory is above 48bits */ @@ -1955,6 +1957,8 @@ static int its_alloc_tables(struct its_node *its) indirect = its_parse_indirect_baser(its, baser, psz, &order, its->device_ids); + break; + case GITS_BASER_TYPE_VCPU: indirect = its_parse_indirect_baser(its, baser, psz, &order, @@ -2292,7 +2296,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) return NULL; } -static bool its_alloc_table_entry(struct its_baser *baser, u32 id) +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) { struct page *page; u32 esz, idx; @@ -2312,7 +2317,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id) /* Allocate memory for 2nd level table */ if (!table[idx]) { - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); if (!page) return false; @@ -2343,7 +2349,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) if (!baser) return (ilog2(dev_id) < its->device_ids); - return its_alloc_table_entry(baser, dev_id); + return its_alloc_table_entry(its, baser, dev_id); } static bool its_alloc_vpe_table(u32 vpe_id) @@ -2367,7 +2373,7 @@ static bool its_alloc_vpe_table(u32 vpe_id) if (!baser) return false; - if (!its_alloc_table_entry(baser, vpe_id)) + if (!its_alloc_table_entry(its, baser, vpe_id)) return false; } @@ -2401,7 +2407,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2, nvecs); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kzalloc(sz, GFP_KERNEL); + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); if (alloc_lpis) { lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) @@ -3543,6 +3549,7 @@ static int __init its_probe_one(struct resource *res, void __iomem *its_base; u32 val, ctlr; u64 baser, tmp, typer; + struct page *page; int err; its_base = ioremap(res->start, resource_size(res)); @@ -3599,12 +3606,13 @@ static int __init its_probe_one(struct resource *res, its->numa_node = numa_node; - its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(ITS_CMD_QUEUE_SZ)); - if (!its->cmd_base) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { err = -ENOMEM; goto out_free_its; } + its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; its->fwnode_handle = handle; its->get_msi_base = its_irq_get_msi_base; diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index b0d4aab1a58c..d000870d9b6b 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -225,14 +225,6 @@ static struct syscore_ops i8259_syscore_ops = { .shutdown = i8259A_shutdown, }; -static int __init i8259A_init_sysfs(void) -{ - register_syscore_ops(&i8259_syscore_ops); - return 0; -} - -device_initcall(i8259A_init_sysfs); - static void init_8259A(int auto_eoi) { unsigned long flags; @@ -332,6 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) panic("Failed to add i8259 IRQ domain"); setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); + register_syscore_ops(&i8259_syscore_ops); return domain; } diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index 5b3f1d735685..d1098f4da6a4 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -10,10 +10,11 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/spinlock.h> -#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) +#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r) #define CHANCTRL 0x0 #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) @@ -21,12 +22,15 @@ #define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) #define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) +#define CHAN_MAX_OUTPUT_INT 0x8 + struct irqsteer_data { void __iomem *regs; struct clk *ipg_clk; - int irq; + int irq[CHAN_MAX_OUTPUT_INT]; + int irq_count; raw_spinlock_t lock; - int irq_groups; + int reg_num; int channel; struct irq_domain *domain; u32 *saved_reg; @@ -35,7 +39,7 @@ struct irqsteer_data { static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, unsigned long irqnum) { - return (data->irq_groups * 2 - irqnum / 32 - 1); + return (data->reg_num - irqnum / 32 - 1); } static void imx_irqsteer_irq_unmask(struct irq_data *d) @@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d) u32 val; raw_spin_lock_irqsave(&data->lock, flags); - val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); + val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); val |= BIT(d->hwirq % 32); - writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); + writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); raw_spin_unlock_irqrestore(&data->lock, flags); } @@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d) u32 val; raw_spin_lock_irqsave(&data->lock, flags); - val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); + val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); val &= ~BIT(d->hwirq % 32); - writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); + writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); raw_spin_unlock_irqrestore(&data->lock, flags); } @@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq) +{ + int i; + + for (i = 0; i < data->irq_count; i++) { + if (data->irq[i] == irq) + return i * 64; + } + + return -EINVAL; +} + static void imx_irqsteer_irq_handler(struct irq_desc *desc) { struct irqsteer_data *data = irq_desc_get_handler_data(desc); - int i; + int hwirq; + int irq, i; chained_irq_enter(irq_desc_get_chip(desc), desc); - for (i = 0; i < data->irq_groups * 64; i += 32) { - int idx = imx_irqsteer_get_reg_index(data, i); + irq = irq_desc_get_irq(desc); + hwirq = imx_irqsteer_get_hwirq_base(data, irq); + if (hwirq < 0) { + pr_warn("%s: unable to get hwirq base for irq %d\n", + __func__, irq); + return; + } + + for (i = 0; i < 2; i++, hwirq += 32) { + int idx = imx_irqsteer_get_reg_index(data, hwirq); unsigned long irqmap; int pos, virq; + if (hwirq >= data->reg_num * 32) + break; + irqmap = readl_relaxed(data->regs + - CHANSTATUS(idx, data->irq_groups)); + CHANSTATUS(idx, data->reg_num)); for_each_set_bit(pos, &irqmap, 32) { - virq = irq_find_mapping(data->domain, pos + i); + virq = irq_find_mapping(data->domain, pos + hwirq); if (virq) generic_handle_irq(virq); } @@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct irqsteer_data *data; struct resource *res; - int ret; + u32 irqs_num; + int i, ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev) return PTR_ERR(data->regs); } - data->irq = platform_get_irq(pdev, 0); - if (data->irq <= 0) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; - } - data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(data->ipg_clk)) { ret = PTR_ERR(data->ipg_clk); @@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev) raw_spin_lock_init(&data->lock); - of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); + of_property_read_u32(np, "fsl,num-irqs", &irqs_num); of_property_read_u32(np, "fsl,channel", &data->channel); + /* + * There is one output irq for each group of 64 inputs. + * One register bit map can represent 32 input interrupts. + */ + data->irq_count = DIV_ROUND_UP(irqs_num, 64); + data->reg_num = irqs_num / 32; + if (IS_ENABLED(CONFIG_PM_SLEEP)) { data->saved_reg = devm_kzalloc(&pdev->dev, - sizeof(u32) * data->irq_groups * 2, + sizeof(u32) * data->reg_num, GFP_KERNEL); if (!data->saved_reg) return -ENOMEM; @@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev) /* steer all IRQs into configured channel */ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); - data->domain = irq_domain_add_linear(np, data->irq_groups * 64, + data->domain = irq_domain_add_linear(np, data->reg_num * 32, &imx_irqsteer_domain_ops, data); if (!data->domain) { dev_err(&pdev->dev, "failed to create IRQ domain\n"); - clk_disable_unprepare(data->ipg_clk); - return -ENOMEM; + ret = -ENOMEM; + goto out; + } + + if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) { + ret = -EINVAL; + goto out; } - irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, - data); + for (i = 0; i < data->irq_count; i++) { + data->irq[i] = irq_of_parse_and_map(np, i); + if (!data->irq[i]) { + ret = -EINVAL; + goto out; + } + + irq_set_chained_handler_and_data(data->irq[i], + imx_irqsteer_irq_handler, + data); + } platform_set_drvdata(pdev, data); return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; } static int imx_irqsteer_remove(struct platform_device *pdev) { struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < irqsteer_data->irq_count; i++) + irq_set_chained_handler_and_data(irqsteer_data->irq[i], + NULL, NULL); - irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL); irq_domain_remove(irqsteer_data->domain); clk_disable_unprepare(irqsteer_data->ipg_clk); @@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data) { int i; - for (i = 0; i < data->irq_groups * 2; i++) + for (i = 0; i < data->reg_num; i++) data->saved_reg[i] = readl_relaxed(data->regs + - CHANMASK(i, data->irq_groups)); + CHANMASK(i, data->reg_num)); } static void imx_irqsteer_restore_regs(struct irqsteer_data *data) @@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data) int i; writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); - for (i = 0; i < data->irq_groups * 2; i++) + for (i = 0; i < data->reg_num; i++) writel_relaxed(data->saved_reg[i], - data->regs + CHANMASK(i, data->irq_groups)); + data->regs + CHANMASK(i, data->reg_num)); } static int imx_irqsteer_suspend(struct device *dev) diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c new file mode 100644 index 000000000000..86b72fbd3b45 --- /dev/null +++ b/drivers/irqchip/irq-ls1x.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com> + * Loongson-1 platform IRQ support + */ + +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/irqchip.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> + +#define LS_REG_INTC_STATUS 0x00 +#define LS_REG_INTC_EN 0x04 +#define LS_REG_INTC_SET 0x08 +#define LS_REG_INTC_CLR 0x0c +#define LS_REG_INTC_POL 0x10 +#define LS_REG_INTC_EDGE 0x14 + +/** + * struct ls1x_intc_priv - private ls1x-intc data. + * @domain: IRQ domain. + * @intc_base: IO Base of intc registers. + */ + +struct ls1x_intc_priv { + struct irq_domain *domain; + void __iomem *intc_base; +}; + + +static void ls1x_chained_handle_irq(struct irq_desc *desc) +{ + struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 pending; + + chained_irq_enter(chip, desc); + pending = readl(priv->intc_base + LS_REG_INTC_STATUS) & + readl(priv->intc_base + LS_REG_INTC_EN); + + if (!pending) + spurious_interrupt(); + + while (pending) { + int bit = __ffs(pending); + + generic_handle_irq(irq_find_mapping(priv->domain, bit)); + pending &= ~BIT(bit); + } + + chained_irq_exit(chip, desc); +} + +static void ls_intc_set_bit(struct irq_chip_generic *gc, + unsigned int offset, + u32 mask, bool set) +{ + if (set) + writel(readl(gc->reg_base + offset) | mask, + gc->reg_base + offset); + else + writel(readl(gc->reg_base + offset) & ~mask, + gc->reg_base + offset); +} + +static int ls_intc_set_type(struct irq_data *data, unsigned int type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + u32 mask = data->mask; + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); + break; + case IRQ_TYPE_LEVEL_LOW: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); + break; + case IRQ_TYPE_EDGE_RISING: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); + break; + case IRQ_TYPE_EDGE_FALLING: + ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); + ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); + break; + default: + return -EINVAL; + } + + irqd_set_trigger_type(data, type); + return irq_setup_alt_chip(data, type); +} + + +static int __init ls1x_intc_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + struct ls1x_intc_priv *priv; + int parent_irq, err = 0; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->intc_base = of_iomap(node, 0); + if (!priv->intc_base) { + err = -ENODEV; + goto out_free_priv; + } + + parent_irq = irq_of_parse_and_map(node, 0); + if (!parent_irq) { + pr_err("ls1x-irq: unable to get parent irq\n"); + err = -ENODEV; + goto out_iounmap; + } + + /* Set up an IRQ domain */ + priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, + NULL); + if (!priv->domain) { + pr_err("ls1x-irq: cannot add IRQ domain\n"); + goto out_iounmap; + } + + err = irq_alloc_domain_generic_chips(priv->domain, 32, 2, + node->full_name, handle_level_irq, + IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0, + IRQ_GC_INIT_MASK_CACHE); + if (err) { + pr_err("ls1x-irq: unable to register IRQ domain\n"); + goto out_free_domain; + } + + /* Mask all irqs */ + writel(0x0, priv->intc_base + LS_REG_INTC_EN); + + /* Ack all irqs */ + writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR); + + /* Set all irqs to high level triggered */ + writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL); + + gc = irq_get_domain_generic_chip(priv->domain, 0); + + gc->reg_base = priv->intc_base; + + ct = gc->chip_types; + ct[0].type = IRQ_TYPE_LEVEL_MASK; + ct[0].regs.mask = LS_REG_INTC_EN; + ct[0].regs.ack = LS_REG_INTC_CLR; + ct[0].chip.irq_unmask = irq_gc_mask_set_bit; + ct[0].chip.irq_mask = irq_gc_mask_clr_bit; + ct[0].chip.irq_ack = irq_gc_ack_set_bit; + ct[0].chip.irq_set_type = ls_intc_set_type; + ct[0].handler = handle_level_irq; + + ct[1].type = IRQ_TYPE_EDGE_BOTH; + ct[1].regs.mask = LS_REG_INTC_EN; + ct[1].regs.ack = LS_REG_INTC_CLR; + ct[1].chip.irq_unmask = irq_gc_mask_set_bit; + ct[1].chip.irq_mask = irq_gc_mask_clr_bit; + ct[1].chip.irq_ack = irq_gc_ack_set_bit; + ct[1].chip.irq_set_type = ls_intc_set_type; + ct[1].handler = handle_edge_irq; + + irq_set_chained_handler_and_data(parent_irq, + ls1x_chained_handle_irq, priv); + + return 0; + +out_free_domain: + irq_domain_remove(priv->domain); +out_iounmap: + iounmap(priv->intc_base); +out_free_priv: + kfree(priv); + + return err; +} + +IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 357e9daf94ae..cf755964f2f8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -59,62 +59,83 @@ static void __iomem *plic_regs; struct plic_handler { bool present; - int ctxid; + void __iomem *hart_base; + /* + * Protect mask operations on the registers given that we can't + * assume atomic memory operations work on them. + */ + raw_spinlock_t enable_lock; + void __iomem *enable_base; }; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); -static inline void __iomem *plic_hart_offset(int ctxid) -{ - return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; -} - -static inline u32 __iomem *plic_enable_base(int ctxid) -{ - return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; -} - -/* - * Protect mask operations on the registers given that we can't assume that - * atomic memory operations work on them. - */ -static DEFINE_RAW_SPINLOCK(plic_toggle_lock); - -static inline void plic_toggle(int ctxid, int hwirq, int enable) +static inline void plic_toggle(struct plic_handler *handler, + int hwirq, int enable) { - u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); + u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); u32 hwirq_mask = 1 << (hwirq % 32); - raw_spin_lock(&plic_toggle_lock); + raw_spin_lock(&handler->enable_lock); if (enable) writel(readl(reg) | hwirq_mask, reg); else writel(readl(reg) & ~hwirq_mask, reg); - raw_spin_unlock(&plic_toggle_lock); + raw_spin_unlock(&handler->enable_lock); } -static inline void plic_irq_toggle(struct irq_data *d, int enable) +static inline void plic_irq_toggle(const struct cpumask *mask, + int hwirq, int enable) { int cpu; - writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); - for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { + writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); + for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) - plic_toggle(handler->ctxid, d->hwirq, enable); + plic_toggle(handler, hwirq, enable); } } static void plic_irq_enable(struct irq_data *d) { - plic_irq_toggle(d, 1); + unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), + cpu_online_mask); + if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) + return; + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); } static void plic_irq_disable(struct irq_data *d) { - plic_irq_toggle(d, 0); + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); } +#ifdef CONFIG_SMP +static int plic_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + unsigned int cpu; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = cpumask_any_and(mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_irq_disabled(d)) { + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + static struct irq_chip plic_chip = { .name = "SiFive PLIC", /* @@ -123,6 +144,9 @@ static struct irq_chip plic_chip = { */ .irq_enable = plic_irq_enable, .irq_disable = plic_irq_disable, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif }; static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, @@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain; static void plic_handle_irq(struct pt_regs *regs) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; + void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; irq_hw_number_t hwirq; WARN_ON_ONCE(!handler->present); @@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node) static int __init plic_init(struct device_node *node, struct device_node *parent) { - int error = 0, nr_handlers, nr_mapped = 0, i; + int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; if (plic_regs) { @@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node, if (WARN_ON(!nr_irqs)) goto out_iounmap; - nr_handlers = of_irq_count(node); - if (WARN_ON(!nr_handlers)) + nr_contexts = of_irq_count(node); + if (WARN_ON(!nr_contexts)) goto out_iounmap; - if (WARN_ON(nr_handlers < num_possible_cpus())) + if (WARN_ON(nr_contexts < num_possible_cpus())) goto out_iounmap; error = -ENOMEM; @@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node, if (WARN_ON(!plic_irqdomain)) goto out_iounmap; - for (i = 0; i < nr_handlers; i++) { + for (i = 0; i < nr_contexts; i++) { struct of_phandle_args parent; struct plic_handler *handler; irq_hw_number_t hwirq; @@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node, } cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + pr_warn("Invalid cpuid for context %d\n", i); + continue; + } + handler = per_cpu_ptr(&plic_handlers, cpu); + if (handler->present) { + pr_warn("handler already present for context %d.\n", i); + continue; + } + handler->present = true; - handler->ctxid = i; + handler->hart_base = + plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; + raw_spin_lock_init(&handler->enable_lock); + handler->enable_base = + plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; /* priority must be > threshold to trigger an interrupt */ - writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); + writel(0, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) - plic_toggle(i, hwirq, 0); - nr_mapped++; + plic_toggle(handler, hwirq, 0); + nr_handlers++; } - pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", - nr_irqs, nr_mapped, nr_handlers); + pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", + nr_irqs, nr_handlers, nr_contexts); set_handle_irq(plic_handle_irq); return 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7fee665ec45e..e905861186e3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2041,53 +2041,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) return ret; } -/* irq_queues covers admin queue */ -static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) +/* + * nirqs is the number of interrupts available for write and read + * queues. The core already reserved an interrupt for the admin queue. + */ +static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) { - unsigned int this_w_queues = write_queues; - - WARN_ON(!irq_queues); - - /* - * Setup read/write queue split, assign admin queue one independent - * irq vector if irq_queues is > 1. - */ - if (irq_queues <= 2) { - dev->io_queues[HCTX_TYPE_DEFAULT] = 1; - dev->io_queues[HCTX_TYPE_READ] = 0; - return; - } - - /* - * If 'write_queues' is set, ensure it leaves room for at least - * one read queue and one admin queue - */ - if (this_w_queues >= irq_queues) - this_w_queues = irq_queues - 2; + struct nvme_dev *dev = affd->priv; + unsigned int nr_read_queues; /* - * If 'write_queues' is set to zero, reads and writes will share - * a queue set. + * If there is no interupt available for queues, ensure that + * the default queue is set to 1. The affinity set size is + * also set to one, but the irq core ignores it for this case. + * + * If only one interrupt is available or 'write_queue' == 0, combine + * write and read queues. + * + * If 'write_queues' > 0, ensure it leaves room for at least one read + * queue. */ - if (!this_w_queues) { - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; - dev->io_queues[HCTX_TYPE_READ] = 0; + if (!nrirqs) { + nrirqs = 1; + nr_read_queues = 0; + } else if (nrirqs == 1 || !write_queues) { + nr_read_queues = 0; + } else if (write_queues >= nrirqs) { + nr_read_queues = 1; } else { - dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; + nr_read_queues = nrirqs - write_queues; } + + dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; + affd->set_size[HCTX_TYPE_READ] = nr_read_queues; + affd->nr_sets = nr_read_queues ? 2 : 1; } static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) { struct pci_dev *pdev = to_pci_dev(dev->dev); - int irq_sets[2]; struct irq_affinity affd = { - .pre_vectors = 1, - .nr_sets = ARRAY_SIZE(irq_sets), - .sets = irq_sets, + .pre_vectors = 1, + .calc_sets = nvme_calc_irq_sets, + .priv = dev, }; - int result = 0; unsigned int irq_queues, this_p_queues; /* @@ -2103,51 +2102,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; - /* - * For irq sets, we have to ask for minvec == maxvec. This passes - * any reduction back to us, so we can adjust our queue counts and - * IRQ vector needs. - */ - do { - nvme_calc_io_queues(dev, irq_queues); - irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT]; - irq_sets[1] = dev->io_queues[HCTX_TYPE_READ]; - if (!irq_sets[1]) - affd.nr_sets = 1; - - /* - * If we got a failure and we're down to asking for just - * 1 + 1 queues, just ask for a single vector. We'll share - * that between the single IO queue and the admin queue. - * Otherwise, we assign one independent vector to admin queue. - */ - if (irq_queues > 1) - irq_queues = irq_sets[0] + irq_sets[1] + 1; + /* Initialize for the single interrupt case */ + dev->io_queues[HCTX_TYPE_DEFAULT] = 1; + dev->io_queues[HCTX_TYPE_READ] = 0; - result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, - irq_queues, - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); - - /* - * Need to reduce our vec counts. If we get ENOSPC, the - * platform should support mulitple vecs, we just need - * to decrease our ask. If we get EINVAL, the platform - * likely does not. Back down to ask for just one vector. - */ - if (result == -ENOSPC) { - irq_queues--; - if (!irq_queues) - return result; - continue; - } else if (result == -EINVAL) { - irq_queues = 1; - continue; - } else if (result <= 0) - return -EIO; - break; - } while (1); - - return result; + return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); } static void nvme_disable_io_queues(struct nvme_dev *dev) @@ -3024,6 +2984,7 @@ static struct pci_driver nvme_driver = { static int __init nvme_init(void) { + BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); return pci_register_driver(&nvme_driver); } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4c0b47867258..73986825d221 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -532,7 +532,7 @@ error_attrs: } static struct msi_desc * -msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) +msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { struct irq_affinity_desc *masks = NULL; struct msi_desc *entry; @@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev) * which could have been allocated. */ static int msi_capability_init(struct pci_dev *dev, int nvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { struct msi_desc *entry; int ret; @@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { struct irq_affinity_desc *curmsk, *masks = NULL; struct msi_desc *entry; @@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev, * requested MSI-X entries with allocated irqs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, - int nvec, const struct irq_affinity *affd) + int nvec, struct irq_affinity *affd) { int ret; u16 control; @@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev) EXPORT_SYMBOL(pci_msix_vec_count); static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int nvec, const struct irq_affinity *affd) + int nvec, struct irq_affinity *affd) { int nr_entries; int i, j; @@ -1018,7 +1018,7 @@ int pci_msi_enabled(void) EXPORT_SYMBOL(pci_msi_enabled); static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { int nvec; int rc; @@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (maxvec < minvec) return -ERANGE; - /* - * If the caller is passing in sets, we can't support a range of - * vectors. The caller needs to handle that. - */ - if (affd && affd->nr_sets && minvec != maxvec) - return -EINVAL; - if (WARN_ON_ONCE(dev->msi_enabled)) return -EINVAL; @@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi); static int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, - int maxvec, const struct irq_affinity *affd) + int maxvec, struct irq_affinity *affd) { int rc, nvec = maxvec; if (maxvec < minvec) return -ERANGE; - /* - * If the caller is passing in sets, we can't support a range of - * supported vectors. The caller needs to handle that. - */ - if (affd && affd->nr_sets && minvec != maxvec) - return -EINVAL; - if (WARN_ON_ONCE(dev->msix_enabled)) return -EINVAL; @@ -1165,9 +1151,9 @@ EXPORT_SYMBOL(pci_enable_msix_range); */ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, - const struct irq_affinity *affd) + struct irq_affinity *affd) { - static const struct irq_affinity msi_default_affd; + struct irq_affinity msi_default_affd = {0}; int msix_vecs = -ENOSPC; int msi_vecs = -ENOSPC; @@ -1196,6 +1182,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, /* use legacy irq if allowed */ if (flags & PCI_IRQ_LEGACY) { if (min_vecs == 1 && dev->irq) { + /* + * Invoke the affinity spreading logic to ensure that + * the device driver can adjust queue configuration + * for the single interrupt case. + */ + if (affd) + irq_create_affinity_masks(1, affd); pci_intx(dev, 1); return 1; } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 74e260027c7d..76e49d902609 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba) /* if eqid_count == 1 fall back to INTX */ if (enable_msix && nvec > 1) { - const struct irq_affinity desc = { .post_vectors = 1 }; + struct irq_affinity desc = { .post_vectors = 1 }; if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { |