summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-04-25 08:17:59 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2018-07-24 15:03:14 +0300
commit5b73151fff63fb019db8171cb81c6c978533844b (patch)
tree61c17b513c864371636878b3d47aede38661632c /arch/powerpc/kernel/smp.c
parent9b81c0211c249c1bc8caec2ddbc86e36c550ce0f (diff)
downloadlinux-5b73151fff63fb019db8171cb81c6c978533844b.tar.xz
powerpc: NMI IPI make NMI IPIs fully sychronous
There is an asynchronous aspect to smp_send_nmi_ipi. The caller waits for all CPUs to call in to the handler, but it does not wait for completion of the handler. This is a needless complication, so remove it and always wait synchronously. The synchronous wait allows the caller to easily time out and clear the wait for completion (zero nmi_ipi_busy_count) in the case of badly behaved handlers. This would have prevented the recent smp_send_stop NMI IPI bug from causing the system to hang. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 4794d6b4f4d2..b19d832ef386 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
fn(regs);
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
+ nmi_ipi_busy_count--;
out:
nmi_ipi_unlock_end(&flags);
@@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
}
}
-void smp_flush_nmi_ipi(u64 delay_us)
-{
- unsigned long flags;
-
- nmi_ipi_lock_start(&flags);
- while (nmi_ipi_busy_count) {
- nmi_ipi_unlock_end(&flags);
- udelay(1);
- if (delay_us) {
- delay_us--;
- if (!delay_us)
- return;
- }
- nmi_ipi_lock_start(&flags);
- }
- nmi_ipi_unlock_end(&flags);
-}
-
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
- * enter the handler, == 0 specifies indefinite delay.
+ * complete executing the handler, == 0 specifies indefinite delay.
*/
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
@@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
do_smp_send_nmi_ipi(cpu, safe);
+ nmi_ipi_lock();
+ /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ nmi_ipi_unlock();
udelay(1);
+ nmi_ipi_lock();
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+ break;
+ }
+ }
+
+ while (nmi_ipi_busy_count > 1) {
+ nmi_ipi_unlock();
+ udelay(1);
+ nmi_ipi_lock();
if (delay_us) {
delay_us--;
if (!delay_us)
@@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
}
}
- nmi_ipi_lock();
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
- /* Could not gather all CPUs */
+ /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ret = 0;
cpumask_clear(&nmi_ipi_pending_mask);
}
+ if (nmi_ipi_busy_count > 1) {
+ /* Timeout waiting for CPUs to execute fn */
+ ret = 0;
+ nmi_ipi_busy_count = 1;
+ }
+
nmi_ipi_busy_count--;
nmi_ipi_unlock_end(&flags);
@@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
nmi_ipi_lock();
- nmi_ipi_busy_count--;
+ if (nmi_ipi_busy_count > 1)
+ nmi_ipi_busy_count--;
nmi_ipi_unlock();
spin_begin();