summaryrefslogtreecommitdiff
path: root/kernel/rcu/tasks.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2021-09-19 06:40:48 +0300
committerPaul E. McKenney <paulmck@kernel.org>2021-12-01 04:29:06 +0300
commitf5dbc594b5bac1fa694174032b8d3d0249945fd3 (patch)
treeb05097ac49921275a8264d51b5d14210cb8557bd /kernel/rcu/tasks.h
parent8c0abfd6d2f6b0221194241ac2908751a2a0385f (diff)
downloadlinux-f5dbc594b5bac1fa694174032b8d3d0249945fd3.tar.xz
rcu-tasks: Don't remove tasks with pending IPIs from holdout list
Currently, the check_all_holdout_tasks_trace() function removes all tasks marked with ->trc_reader_checked from the holdout list, including those with IPIs pending. This means that the IPI handler might arrive at a task that has already been removed from the list, which is at best an accident waiting to happen. This commit therefore avoids removing tasks with IPIs pending from the holdout list. This in turn means that the "if" condition in the for_each_online_cpu() loop in rcu_tasks_trace_postgp() should always evaluate to false, so a WARN_ON_ONCE() is added to check that. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tasks.h')
-rw-r--r--kernel/rcu/tasks.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 7da3c81c3f59..bd44cd4794d3 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -1121,7 +1121,8 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
trc_wait_for_one_reader(t, hop);
// If check succeeded, remove this task from the list.
- if (READ_ONCE(t->trc_reader_checked))
+ if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
+ READ_ONCE(t->trc_reader_checked))
trc_del_holdout(t);
else if (needreport)
show_stalled_task_trace(t, firstreport);
@@ -1156,7 +1157,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
// Yes, this assumes that CPUs process IPIs in order. If that ever
// changes, there will need to be a recheck and/or timed wait.
for_each_online_cpu(cpu)
- if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
+ if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
// Remove the safety count.