summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2018-05-02 16:07:27 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2018-06-03 13:43:43 +0300
commit6ba55716a24f5f399ad4d37685e4bb721f8e6dd5 (patch)
tree2197f84b380924e2e5190734c3de32d9ad146d01
parent7b08729cb272b4cd5c657cd5ac0dddae15a593ff (diff)
downloadlinux-6ba55716a24f5f399ad4d37685e4bb721f8e6dd5.tar.xz
powerpc/nmi: Add an API for sending "safe" NMIs
Currently the options we have for sending NMIs are not necessarily safe, that is they can potentially interrupt a CPU in a non-recoverable region of code, meaning the kernel must then panic(). But we'd like to use smp_send_nmi_ipi() to do cross-CPU calls in situations where we don't want to risk a panic(), because it doesn't have the requirement that interrupts must be enabled like smp_call_function(). So add an API for the caller to indicate that it wants to use the NMI infrastructure, but doesn't want to do anything "unsafe". Currently that is implemented by not actually calling cause_nmi_ipi(), instead falling back to an IPI. In future we can pass the safe parameter down to cause_nmi_ipi() and the individual backends can potentially take it into account before deciding what to do. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/kernel/smp.c20
2 files changed, 16 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index cfecfee1194b..29ffaabdf75b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -58,6 +58,7 @@ struct smp_ops_t {
extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
extern void smp_generic_give_timebase(void);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index b009a562c76b..5eadfffabe35 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -430,9 +430,9 @@ out:
return ret;
}
-static void do_smp_send_nmi_ipi(int cpu)
+static void do_smp_send_nmi_ipi(int cpu, bool safe)
{
- if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
+ if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
return;
if (cpu >= 0) {
@@ -472,7 +472,7 @@ void smp_flush_nmi_ipi(u64 delay_us)
* - delay_us > 0 is the delay before giving up waiting for targets to
* enter the handler, == 0 specifies indefinite delay.
*/
-int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
unsigned long flags;
int me = raw_smp_processor_id();
@@ -505,7 +505,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
nmi_ipi_busy_count++;
nmi_ipi_unlock();
- do_smp_send_nmi_ipi(cpu);
+ do_smp_send_nmi_ipi(cpu, safe);
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
udelay(1);
@@ -527,6 +527,16 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
return ret;
}
+
+int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+{
+ return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
+}
+
+int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+{
+ return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
+}
#endif /* CONFIG_NMI_IPI */
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -570,7 +580,7 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
* entire NMI dance and waiting for
* cpus to clear pending mask, etc.
*/
- do_smp_send_nmi_ipi(cpu);
+ do_smp_send_nmi_ipi(cpu, false);
}
}
}