summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2021-04-16 02:30:34 +0300
committerPaul E. McKenney <paulmck@kernel.org>2021-05-11 02:44:11 +0300
commit5390473ec1697b71af0e9d63ef7aaa7ecd27e2c9 (patch)
tree07bb07362e0342d2e61cb65375e63262ddedc810 /kernel/rcu
parent7ab2bd31df871408792eac871c4187e29d039315 (diff)
downloadlinux-5390473ec1697b71af0e9d63ef7aaa7ecd27e2c9.tar.xz
rcu: Don't penalize priority boosting when there is nothing to boost
RCU priority boosting cannot do anything unless there is at least one task blocking the current RCU grace period that was preempted within the RCU read-side critical section that it still resides in. However, the current rcu_torture_boost_failed() code will count this as an RCU priority-boosting failure if there were no CPUs blocking the current grace period. This situation can happen (for example) if the last CPU blocking the current grace period was subjected to vCPU preemption, which is always a risk for rcutorture guest OSes. This commit therefore causes rcu_torture_boost_failed() to refrain from reporting failure unless there is at least one task blocking the current RCU grace period that was preempted within the RCU read-side critical section that it still resides in. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_stall.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 8bde1b53b0c9..65302518e006 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -723,6 +723,10 @@ static void check_cpu_stall(struct rcu_data *rdp)
* count this as an RCU priority boosting failure. A return of true says
* RCU priority boosting is to blame, and false says otherwise. If false
* is returned, the first of the CPUs to blame is stored through cpup.
+ * If there was no CPU blocking the current grace period, but also nothing
+ * in need of being boosted, *cpup is set to -1. This can happen in case
+ * of vCPU preemption while the last CPU is reporting its quiscent state,
+ * for example.
*
* If cpup is NULL, then a lockless quick check is carried out, suitable
* for high-rate usage. On the other hand, if cpup is non-NULL, each
@@ -730,18 +734,25 @@ static void check_cpu_stall(struct rcu_data *rdp)
*/
bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
{
+ bool atb = false;
int cpu;
unsigned long flags;
struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp) {
if (!cpup) {
- if (READ_ONCE(rnp->qsmask))
+ if (READ_ONCE(rnp->qsmask)) {
return false;
- else
+ } else {
+ if (READ_ONCE(rnp->gp_tasks))
+ atb = true;
continue;
+ }
}
+ *cpup = -1;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ if (rnp->gp_tasks)
+ atb = true;
if (!rnp->qsmask) {
// No CPUs without quiescent states for this rnp.
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -758,7 +769,7 @@ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
// Can't blame CPUs, so must blame RCU priority boosting.
- return true;
+ return atb;
}
EXPORT_SYMBOL_GPL(rcu_check_boost_fail);