diff options
author | Anup Patel <apatel@ventanamicro.com> | 2023-03-28 06:52:19 +0300 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2023-04-08 13:26:24 +0300 |
commit | 832f15f42646812b096bc67c0eac439291a0db1f (patch) | |
tree | ef8ad0a6a4059884420af3c093f0f182ac6aff28 /arch/riscv/kernel | |
parent | 0c60a31ce62ca3e93550868fd699dfc4dfc4e795 (diff) | |
download | linux-832f15f42646812b096bc67c0eac439291a0db1f.tar.xz |
RISC-V: Treat IPIs as normal Linux IRQs
Currently, the RISC-V kernel provides arch specific hooks (i.e.
struct riscv_ipi_ops) to register IPI handling methods. The stats
gathering of IPIs is also arch specific in the RISC-V kernel.
Other architectures (such as ARM, ARM64, and MIPS) have moved away
from custom arch specific IPI handling methods. Currently, these
architectures have Linux irqchip drivers providing a range of Linux
IRQ numbers to be used as IPIs and IPI triggering is done using
generic IPI APIs. This approach allows architectures to treat IPIs
as normal Linux IRQs and IPI stats gathering is done by the generic
Linux IRQ subsystem.
We extend the RISC-V IPI handling as-per above approach so that arch
specific IPI handling methods (struct riscv_ipi_ops) can be removed
and the IPI handling is done through the Linux IRQ subsystem.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Acked-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230328035223.1480939-4-apatel@ventanamicro.com
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r-- | arch/riscv/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/cpu-hotplug.c | 3 | ||||
-rw-r--r-- | arch/riscv/kernel/irq.c | 3 | ||||
-rw-r--r-- | arch/riscv/kernel/sbi-ipi.c | 77 | ||||
-rw-r--r-- | arch/riscv/kernel/sbi.c | 106 | ||||
-rw-r--r-- | arch/riscv/kernel/smp.c | 160 | ||||
-rw-r--r-- | arch/riscv/kernel/smpboot.c | 5 |
7 files changed, 179 insertions, 176 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 4cf303a779ab..67f542be1bea 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -74,6 +74,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_RISCV_SBI) += sbi.o ifeq ($(CONFIG_RISCV_SBI), y) +obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c index f7a832e3a1d1..39235cf50652 100644 --- a/arch/riscv/kernel/cpu-hotplug.c +++ b/arch/riscv/kernel/cpu-hotplug.c @@ -13,7 +13,7 @@ #include <asm/irq.h> #include <asm/cpu_ops.h> #include <asm/numa.h> -#include <asm/sbi.h> +#include <asm/smp.h> bool cpu_has_hotplug(unsigned int cpu) { @@ -43,6 +43,7 @@ int __cpu_disable(void) remove_cpu_topology(cpu); numa_remove_cpu(cpu); set_cpu_online(cpu, false); + riscv_ipi_disable(); irq_migrate_all_off_this_cpu(); return ret; diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 96d3171f0ca1..eb9a68a539e6 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -10,7 +10,7 @@ #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/seq_file.h> -#include <asm/smp.h> +#include <asm/sbi.h> static struct fwnode_handle *(*__get_intc_node)(void); @@ -39,4 +39,5 @@ void __init init_IRQ(void) irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); + sbi_ipi_init(); } diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c new file mode 100644 index 000000000000..41981ab88493 --- /dev/null +++ b/arch/riscv/kernel/sbi-ipi.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Multiplex several IPIs over a single HW IPI. + * + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv: " fmt +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <asm/sbi.h> + +static int sbi_ipi_virq; + +static void sbi_ipi_handle(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + + csr_clear(CSR_IP, IE_SIE); + ipi_mux_process(); + + chained_irq_exit(chip, desc); +} + +static int sbi_ipi_starting_cpu(unsigned int cpu) +{ + enable_percpu_irq(sbi_ipi_virq, irq_get_trigger_type(sbi_ipi_virq)); + return 0; +} + +void __init sbi_ipi_init(void) +{ + int virq; + struct irq_domain *domain; + + if (riscv_ipi_have_virq_range()) + return; + + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), + DOMAIN_BUS_ANY); + if (!domain) { + pr_err("unable to find INTC IRQ domain\n"); + return; + } + + sbi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); + if (!sbi_ipi_virq) { + pr_err("unable to create INTC IRQ mapping\n"); + return; + } + + virq = ipi_mux_create(BITS_PER_BYTE, sbi_send_ipi); + if (virq <= 0) { + pr_err("unable to create muxed IPIs\n"); + irq_dispose_mapping(sbi_ipi_virq); + return; + } + + irq_set_chained_handler(sbi_ipi_virq, sbi_ipi_handle); + + /* + * Don't disable IPI when CPU goes offline because + * the masking/unmasking of virtual IPIs is done + * via generic IPI-Mux + */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/sbi-ipi:starting", + sbi_ipi_starting_cpu, NULL); + + riscv_ipi_set_virq_range(virq, BITS_PER_BYTE); + pr_info("providing IPIs using SBI IPI extension\n"); +} diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index ac99a70ead6a..92b9b759ab3d 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -17,7 +17,7 @@ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; -static int (*__sbi_send_ipi)(const struct cpumask *cpu_mask) __ro_after_init; +static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init; static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) __ro_after_init; @@ -131,17 +131,6 @@ void sbi_shutdown(void) EXPORT_SYMBOL(sbi_shutdown); /** - * sbi_clear_ipi() - Clear any pending IPIs for the calling hart. - * - * Return: None - */ -void sbi_clear_ipi(void) -{ - sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0); -} -EXPORT_SYMBOL(sbi_clear_ipi); - -/** * __sbi_set_timer_v01() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. * @@ -157,17 +146,12 @@ static void __sbi_set_timer_v01(uint64_t stime_value) #endif } -static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) +static void __sbi_send_ipi_v01(unsigned int cpu) { - unsigned long hart_mask; - - if (!cpu_mask || cpumask_empty(cpu_mask)) - cpu_mask = cpu_online_mask; - hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); - + unsigned long hart_mask = + __sbi_v01_cpumask_to_hartmask(cpumask_of(cpu)); sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask), 0, 0, 0, 0, 0); - return 0; } static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, @@ -216,12 +200,10 @@ static void __sbi_set_timer_v01(uint64_t stime_value) sbi_major_version(), sbi_minor_version()); } -static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask) +static void __sbi_send_ipi_v01(unsigned int cpu) { pr_warn("IPI extension is not available in SBI v%lu.%lu\n", sbi_major_version(), sbi_minor_version()); - - return 0; } static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, @@ -248,55 +230,18 @@ static void __sbi_set_timer_v02(uint64_t stime_value) #endif } -static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask) +static void __sbi_send_ipi_v02(unsigned int cpu) { - unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0; - struct sbiret ret = {0}; int result; + struct sbiret ret = {0}; - if (!cpu_mask || cpumask_empty(cpu_mask)) - cpu_mask = cpu_online_mask; - - for_each_cpu(cpuid, cpu_mask) { - hartid = cpuid_to_hartid_map(cpuid); - if (hmask) { - if (hartid + BITS_PER_LONG <= htop || - hbase + BITS_PER_LONG <= hartid) { - ret = sbi_ecall(SBI_EXT_IPI, - SBI_EXT_IPI_SEND_IPI, hmask, - hbase, 0, 0, 0, 0); - if (ret.error) - goto ecall_failed; - hmask = 0; - } else if (hartid < hbase) { - /* shift the mask to fit lower hartid */ - hmask <<= hbase - hartid; - hbase = hartid; - } - } - if (!hmask) { - hbase = hartid; - htop = hartid; - } else if (hartid > htop) { - htop = hartid; - } - hmask |= BIT(hartid - hbase); - } - - if (hmask) { - ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, - hmask, hbase, 0, 0, 0, 0); - if (ret.error) - goto ecall_failed; + ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, + 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0); + if (ret.error) { + result = sbi_err_map_linux_errno(ret.error); + pr_err("%s: hbase = [%lu] failed (error [%d])\n", + __func__, cpuid_to_hartid_map(cpu), result); } - - return 0; - -ecall_failed: - result = sbi_err_map_linux_errno(ret.error); - pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n", - __func__, hbase, hmask, result); - return result; } static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask, @@ -410,13 +355,11 @@ void sbi_set_timer(uint64_t stime_value) /** * sbi_send_ipi() - Send an IPI to any hart. - * @cpu_mask: A cpu mask containing all the target harts. - * - * Return: 0 on success, appropriate linux error code otherwise. + * @cpu: Logical id of the target CPU. */ -int sbi_send_ipi(const struct cpumask *cpu_mask) +void sbi_send_ipi(unsigned int cpu) { - return __sbi_send_ipi(cpu_mask); + __sbi_send_ipi(cpu); } EXPORT_SYMBOL(sbi_send_ipi); @@ -641,21 +584,6 @@ long sbi_get_mimpid(void) } EXPORT_SYMBOL_GPL(sbi_get_mimpid); -static void sbi_send_cpumask_ipi(const struct cpumask *target) -{ - sbi_send_ipi(target); -} - -static void sbi_ipi_clear(void) -{ - csr_clear(CSR_IP, IE_SIE); -} - -static const struct riscv_ipi_ops sbi_ipi_ops = { - .ipi_inject = sbi_send_cpumask_ipi, - .ipi_clear = sbi_ipi_clear -}; - void __init sbi_init(void) { int ret; @@ -702,6 +630,4 @@ void __init sbi_init(void) __sbi_send_ipi = __sbi_send_ipi_v01; __sbi_rfence = __sbi_rfence_v01; } - - riscv_set_ipi_ops(&sbi_ipi_ops); } diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 8a12768c09ee..47e7ecfedb4d 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -13,14 +13,15 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kexec.h> +#include <linux/percpu.h> #include <linux/profile.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/delay.h> +#include <linux/irq.h> #include <linux/irq_work.h> -#include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/cpu_ops.h> @@ -44,11 +45,10 @@ void __init smp_setup_processor_id(void) cpuid_to_hartid_map(0) = boot_cpu_hartid; } -/* A collection of single bit ipi messages. */ -static struct { - unsigned long stats[IPI_MAX] ____cacheline_aligned; - unsigned long bits ____cacheline_aligned; -} ipi_data[NR_CPUS] __cacheline_aligned; +static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev); +static int ipi_virq_base __ro_after_init; +static int nr_ipi __ro_after_init = IPI_MAX; +static struct irq_desc *ipi_desc[IPI_MAX] __read_mostly; int riscv_hartid_to_cpuid(unsigned long hartid) { @@ -100,46 +100,14 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) } #endif -static const struct riscv_ipi_ops *ipi_ops __ro_after_init; - -void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) -{ - ipi_ops = ops; -} -EXPORT_SYMBOL_GPL(riscv_set_ipi_ops); - -void riscv_clear_ipi(void) -{ - if (ipi_ops && ipi_ops->ipi_clear) - ipi_ops->ipi_clear(); -} -EXPORT_SYMBOL_GPL(riscv_clear_ipi); - static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) { - int cpu; - - smp_mb__before_atomic(); - for_each_cpu(cpu, mask) - set_bit(op, &ipi_data[cpu].bits); - smp_mb__after_atomic(); - - if (ipi_ops && ipi_ops->ipi_inject) - ipi_ops->ipi_inject(mask); - else - pr_warn("SMP: IPI inject method not available\n"); + __ipi_send_mask(ipi_desc[op], mask); } static void send_ipi_single(int cpu, enum ipi_message_type op) { - smp_mb__before_atomic(); - set_bit(op, &ipi_data[cpu].bits); - smp_mb__after_atomic(); - - if (ipi_ops && ipi_ops->ipi_inject) - ipi_ops->ipi_inject(cpumask_of(cpu)); - else - pr_warn("SMP: IPI inject method not available\n"); + __ipi_send_mask(ipi_desc[op], cpumask_of(cpu)); } #ifdef CONFIG_IRQ_WORK @@ -149,59 +117,89 @@ void arch_irq_work_raise(void) } #endif -void handle_IPI(struct pt_regs *regs) +static irqreturn_t handle_IPI(int irq, void *data) +{ + int ipi = irq - ipi_virq_base; + + switch (ipi) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + case IPI_CPU_STOP: + ipi_stop(); + break; + case IPI_CPU_CRASH_STOP: + ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs()); + break; + case IPI_IRQ_WORK: + irq_work_run(); + break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + case IPI_TIMER: + tick_receive_broadcast(); + break; +#endif + default: + pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); + break; + } + + return IRQ_HANDLED; +} + +void riscv_ipi_enable(void) { - unsigned int cpu = smp_processor_id(); - unsigned long *pending_ipis = &ipi_data[cpu].bits; - unsigned long *stats = ipi_data[cpu].stats; + int i; - riscv_clear_ipi(); + if (WARN_ON_ONCE(!ipi_virq_base)) + return; - while (true) { - unsigned long ops; + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_virq_base + i, 0); +} - /* Order bit clearing and data access. */ - mb(); +void riscv_ipi_disable(void) +{ + int i; - ops = xchg(pending_ipis, 0); - if (ops == 0) - return; + if (WARN_ON_ONCE(!ipi_virq_base)) + return; - if (ops & (1 << IPI_RESCHEDULE)) { - stats[IPI_RESCHEDULE]++; - scheduler_ipi(); - } + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_virq_base + i); +} - if (ops & (1 << IPI_CALL_FUNC)) { - stats[IPI_CALL_FUNC]++; - generic_smp_call_function_interrupt(); - } +bool riscv_ipi_have_virq_range(void) +{ + return (ipi_virq_base) ? true : false; +} - if (ops & (1 << IPI_CPU_STOP)) { - stats[IPI_CPU_STOP]++; - ipi_stop(); - } +void riscv_ipi_set_virq_range(int virq, int nr) +{ + int i, err; - if (ops & (1 << IPI_CPU_CRASH_STOP)) { - ipi_cpu_crash_stop(cpu, get_irq_regs()); - } + if (WARN_ON(ipi_virq_base)) + return; - if (ops & (1 << IPI_IRQ_WORK)) { - stats[IPI_IRQ_WORK]++; - irq_work_run(); - } + WARN_ON(nr < IPI_MAX); + nr_ipi = min(nr, IPI_MAX); + ipi_virq_base = virq; -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST - if (ops & (1 << IPI_TIMER)) { - stats[IPI_TIMER]++; - tick_receive_broadcast(); - } -#endif - BUG_ON((ops >> IPI_MAX) != 0); + /* Request IPIs */ + for (i = 0; i < nr_ipi; i++) { + err = request_percpu_irq(ipi_virq_base + i, handle_IPI, + "IPI", &ipi_dummy_dev); + WARN_ON(err); - /* Order data access and bit testing. */ - mb(); + ipi_desc[i] = irq_to_desc(ipi_virq_base + i); + irq_set_status_flags(ipi_virq_base + i, IRQ_HIDDEN); } + + /* Enabled IPIs for boot CPU immediately */ + riscv_ipi_enable(); } static const char * const ipi_names[] = { @@ -221,7 +219,7 @@ void show_ipi_stats(struct seq_file *p, int prec) seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) - seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); + seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); seq_printf(p, " %s\n", ipi_names[i]); } } diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index ddb2afba6d25..00b53913d4c6 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -30,7 +30,6 @@ #include <asm/numa.h> #include <asm/tlbflush.h> #include <asm/sections.h> -#include <asm/sbi.h> #include <asm/smp.h> #include "head.h" @@ -158,12 +157,12 @@ asmlinkage __visible void smp_callin(void) struct mm_struct *mm = &init_mm; unsigned int curr_cpuid = smp_processor_id(); - riscv_clear_ipi(); - /* All kernel threads share the same mm context. */ mmgrab(mm); current->active_mm = mm; + riscv_ipi_enable(); + store_cpu_topology(curr_cpuid); notify_cpu_starting(curr_cpuid); numa_add_cpu(curr_cpuid); |