From 2eed973adc6e749439730e53e6220b122398d319 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 16 Feb 2022 16:42:06 +0100 Subject: rcu: Assume rcu_init() is called before smp The rcu_init() function is called way before SMP is initialized and therefore only the boot CPU should be online at this stage. Simplify the boot per-cpu initialization accordingly. Signed-off-by: Frederic Weisbecker Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a4b8189455d5..e6a9e5744e45 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4782,7 +4782,7 @@ static void __init kfree_rcu_batch_init(void) void __init rcu_init(void) { - int cpu; + int cpu = smp_processor_id(); rcu_early_boot_tests(); @@ -4802,11 +4802,10 @@ void __init rcu_init(void) * or the scheduler are operational. */ pm_notifier(rcu_pm_notify, 0); - for_each_online_cpu(cpu) { - rcutree_prepare_cpu(cpu); - rcu_cpu_starting(cpu); - rcutree_online_cpu(cpu); - } + WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. + rcutree_prepare_cpu(cpu); + rcu_cpu_starting(cpu); + rcutree_online_cpu(cpu); /* Create workqueue for Tree SRCU and for expedited GPs. */ rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); -- cgit v1.2.3 From 3352911fa9b47a90165e5c6fed440048c55146d1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 16 Feb 2022 16:42:07 +0100 Subject: rcu: Initialize boost kthread only for boot node prior SMP initialization The rcu_spawn_gp_kthread() function is called as an early initcall, which means that SMP initialization hasn't happened yet and only the boot CPU is online. Therefore, create only the boost kthread for the leaf node of the boot CPU. Signed-off-by: Frederic Weisbecker Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 ++++- kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 16 ---------------- 3 files changed, 4 insertions(+), 18 deletions(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e6a9e5744e45..70b33c55d39a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4480,6 +4480,7 @@ static int __init rcu_spawn_gp_kthread(void) struct rcu_node *rnp; struct sched_param sp; struct task_struct *t; + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rcu_scheduler_fully_active = 1; t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); @@ -4498,7 +4499,9 @@ static int __init rcu_spawn_gp_kthread(void) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); wake_up_process(t); rcu_spawn_nocb_kthreads(); - rcu_spawn_boost_kthreads(); + /* This is a pre-SMP initcall, we expect a single CPU */ + WARN_ON(num_online_cpus() > 1); + rcu_spawn_one_boost_kthread(rdp->mynode); rcu_spawn_core_kthreads(); return 0; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index f6a3d54585c9..b2a0f2613ab9 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -422,7 +422,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static bool rcu_is_callbacks_kthread(void); static void rcu_cpu_kthread_setup(unsigned int cpu); static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); -static void __init rcu_spawn_boost_kthreads(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void rcu_preempt_deferred_qs(struct task_struct *t); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8360d86db1c0..b139635f33bd 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1226,18 +1226,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } -/* - * Spawn boost kthreads -- called as soon as the scheduler is running. - */ -static void __init rcu_spawn_boost_kthreads(void) -{ - struct rcu_node *rnp; - - rcu_for_each_leaf_node(rnp) - if (rcu_rnp_online_cpus(rnp)) - rcu_spawn_one_boost_kthread(rnp); -} - #else /* #ifdef CONFIG_RCU_BOOST */ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) @@ -1263,10 +1251,6 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) { } -static void __init rcu_spawn_boost_kthreads(void) -{ -} - #endif /* #else #ifdef CONFIG_RCU_BOOST */ /* -- cgit v1.2.3 From 87c5adf06bfbf14c9d13e59d5d174ff5f2aafc0e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 16 Feb 2022 16:42:08 +0100 Subject: rcu/nocb: Initialize nocb kthreads only for boot CPU prior SMP initialization The rcu_spawn_gp_kthread() function is called as an early initcall, which means that SMP initialization hasn't happened yet and only the boot CPU is online. Therefore, create only the NOCB kthreads related to the boot CPU. Signed-off-by: Frederic Weisbecker Cc: Neeraj Upadhyay Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 6 +++++- kernel/rcu/tree.h | 1 - kernel/rcu/tree_nocb.h | 20 -------------------- 3 files changed, 5 insertions(+), 22 deletions(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 70b33c55d39a..9f7441a78f90 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4498,9 +4498,13 @@ static int __init rcu_spawn_gp_kthread(void) smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); wake_up_process(t); - rcu_spawn_nocb_kthreads(); /* This is a pre-SMP initcall, we expect a single CPU */ WARN_ON(num_online_cpus() > 1); + /* + * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() + * due to rcu_scheduler_fully_active. + */ + rcu_spawn_cpu_nocb_kthread(smp_processor_id()); rcu_spawn_one_boost_kthread(rdp->mynode); rcu_spawn_core_kthreads(); return 0; diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b2a0f2613ab9..25dc4166f218 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -439,7 +439,6 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); static void rcu_spawn_cpu_nocb_kthread(int cpu); -static void __init rcu_spawn_nocb_kthreads(void); static void show_rcu_nocb_state(struct rcu_data *rdp); static void rcu_nocb_lock(struct rcu_data *rdp); static void rcu_nocb_unlock(struct rcu_data *rdp); diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 3c00240833d6..46694e13398a 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1266,22 +1266,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); } -/* - * Once the scheduler is running, spawn rcuo kthreads for all online - * no-CBs CPUs. This assumes that the early_initcall()s happen before - * non-boot CPUs come online -- if this changes, we will need to add - * some mutual exclusion. - */ -static void __init rcu_spawn_nocb_kthreads(void) -{ - int cpu; - - if (rcu_state.nocb_is_setup) { - for_each_online_cpu(cpu) - rcu_spawn_cpu_nocb_kthread(cpu); - } -} - /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ static int rcu_nocb_gp_stride = -1; module_param(rcu_nocb_gp_stride, int, 0444); @@ -1538,10 +1522,6 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) { } -static void __init rcu_spawn_nocb_kthreads(void) -{ -} - static void show_rcu_nocb_state(struct rcu_data *rdp) { } -- cgit v1.2.3 From 5d90070816534882b9158f14154b7e2cdef1194a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 4 Mar 2022 10:41:44 -0800 Subject: rcu-tasks: Make Tasks RCU account for userspace execution The main Tasks RCU quiescent state is voluntary context switch. However, userspace execution is also a valid quiescent state, and is a valuable one for userspace applications that spin repeatedly executing light-weight non-sleeping system calls. Currently, such an application can delay a Tasks RCU grace period for many tens of seconds. This commit therefore enlists the aid of the scheduler-clock interrupt to provide a Tasks RCU quiescent state when it interrupted a task executing in userspace. [ paulmck: Apply feedback from kernel test robot. ] Cc: Martin KaFai Lau Cc: Neil Spring Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 1 + kernel/rcu/tree.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'kernel/rcu/tree.c') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e7c39c200e2b..1a32036c918c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -196,6 +196,7 @@ void synchronize_rcu_tasks_rude(void); void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +#define rcu_tasks_classic_qs(t, preempt) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a4b8189455d5..8dbfb63f0391 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2624,6 +2624,8 @@ void rcu_sched_clock_irq(int user) rcu_flavor_sched_clock_irq(user); if (rcu_pending(user)) invoke_rcu_core(); + if (user) + rcu_tasks_classic_qs(current, false); lockdep_assert_irqs_disabled(); trace_rcu_utilization(TPS("End scheduler-tick")); -- cgit v1.2.3 From 99d6a2acb8955f12489bfba04f2db22bc0b57726 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 4 Feb 2022 12:45:18 -0800 Subject: rcutorture: Suppress debugging grace period delays during flooding Tree RCU supports grace-period delays using the rcutree.gp_cleanup_delay, rcutree.gp_init_delay, and rcutree.gp_preinit_delay kernel boot parameters. These delays are strictly for debugging purposes, and have proven quite effective at exposing bugs involving race with CPU-hotplug operations. However, these delays can result in false positives when used in conjunction with callback flooding, for example, those generated by the rcutorture.fwd_progress kernel boot parameter. This commit therefore suppresses grace-period delays while callback flooding is in progress. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 4 ++++ kernel/rcu/rcutorture.c | 4 ++++ kernel/rcu/tree.c | 32 +++++++++++++++++++++++++++++--- 3 files changed, 37 insertions(+), 3 deletions(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 24b5f2c2de87..7a221393fcdb 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -523,6 +523,8 @@ static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { ret static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } static inline void rcu_fwd_progress_check(unsigned long j) { } +static inline void rcu_gp_slow_register(atomic_t *rgssp) { } +static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } #else /* #ifdef CONFIG_TINY_RCU */ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); unsigned long rcu_get_gp_seq(void); @@ -535,6 +537,8 @@ void rcu_fwd_progress_check(unsigned long j); void rcu_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; +void rcu_gp_slow_register(atomic_t *rgssp); +void rcu_gp_slow_unregister(atomic_t *rgssp); #endif /* #else #ifdef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_NOCB_CPU diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 55d049c39608..f37b7a01dcd0 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2916,10 +2916,12 @@ rcu_torture_cleanup(void) pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); cur_ops->cb_barrier(); } + rcu_gp_slow_unregister(NULL); return; } if (!cur_ops) { torture_cleanup_end(); + rcu_gp_slow_unregister(NULL); return; } @@ -3016,6 +3018,7 @@ rcu_torture_cleanup(void) else rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); torture_cleanup_end(); + rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay); } #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD @@ -3320,6 +3323,7 @@ rcu_torture_init(void) if (object_debug) rcu_test_debug_objects(); torture_init_end(); + rcu_gp_slow_register(&rcu_fwd_cb_nodelay); return 0; unwind: diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a4b8189455d5..db67dae8ed88 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1705,11 +1705,37 @@ static void note_gp_changes(struct rcu_data *rdp) rcu_gp_kthread_wake(); } +static atomic_t *rcu_gp_slow_suppress; + +/* Register a counter to suppress debugging grace-period delays. */ +void rcu_gp_slow_register(atomic_t *rgssp) +{ + WARN_ON_ONCE(rcu_gp_slow_suppress); + + WRITE_ONCE(rcu_gp_slow_suppress, rgssp); +} +EXPORT_SYMBOL_GPL(rcu_gp_slow_register); + +/* Unregister a counter, with NULL for not caring which. */ +void rcu_gp_slow_unregister(atomic_t *rgssp) +{ + WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress); + + WRITE_ONCE(rcu_gp_slow_suppress, NULL); +} +EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister); + +static bool rcu_gp_slow_is_suppressed(void) +{ + atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress); + + return rgssp && atomic_read(rgssp); +} + static void rcu_gp_slow(int delay) { - if (delay > 0 && - !(rcu_seq_ctr(rcu_state.gp_seq) % - (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) + if (!rcu_gp_slow_is_suppressed() && delay > 0 && + !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) schedule_timeout_idle(delay); } -- cgit v1.2.3 From c708b08c65a0dfae127b9ee33b0fb73535a5e066 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Feb 2022 17:29:37 -0800 Subject: rcu: Check for jiffies going backwards A report of a 12-jiffy normal RCU CPU stall warning raises interesting questions about the nature of time on the offending system. This commit instruments rcu_sched_clock_irq(), which is RCU's hook into the scheduling-clock interrupt, checking for the jiffies counter going backwards. Reported-by: Saravanan D Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 10 ++++++++++ kernel/rcu/tree.h | 1 + 2 files changed, 11 insertions(+) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a4b8189455d5..a5ea67454640 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1679,6 +1679,8 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); + if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) + WRITE_ONCE(rdp->last_sched_clock, jiffies); WRITE_ONCE(rdp->gpwrap, false); rcu_gpnum_ovf(rnp, rdp); return ret; @@ -2609,6 +2611,13 @@ static void rcu_do_batch(struct rcu_data *rdp) */ void rcu_sched_clock_irq(int user) { + unsigned long j; + + if (IS_ENABLED(CONFIG_PROVE_RCU)) { + j = jiffies; + WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); + __this_cpu_write(rcu_data.last_sched_clock, j); + } trace_rcu_utilization(TPS("Start scheduler-tick")); lockdep_assert_irqs_disabled(); raw_cpu_inc(rcu_data.ticks_this_gp); @@ -4179,6 +4188,7 @@ rcu_boot_init_percpu_data(int cpu) rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; rdp->rcu_onl_gp_seq = rcu_state.gp_seq; rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; + rdp->last_sched_clock = jiffies; rdp->cpu = cpu; rcu_boot_init_nocb_percpu_data(rdp); } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 926673ebe355..94b55f669915 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -254,6 +254,7 @@ struct rcu_data { unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ short rcu_onl_gp_flags; /* ->gp_flags at last online. */ unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ + unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */ int cpu; }; -- cgit v1.2.3 From 75182a4eaaf8b697f66d68ad039f021f461dd2a4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 2 Mar 2022 11:01:37 -0800 Subject: rcu: Add comments to final rcu_gp_cleanup() "if" statement The final "if" statement in rcu_gp_cleanup() has proven to be rather confusing, straightforward though it might have seemed when initially written. This commit therefore adds comments to its "then" and "else" clauses to at least provide a more elevated form of confusion. Reported-by: Boqun Feng Reported-by: Frederic Weisbecker Reported-by: Neeraj Upadhyay Reported-by: Uladzislau Rezki Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a5ea67454640..29669070348e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2098,14 +2098,29 @@ static noinline void rcu_gp_cleanup(void) /* Advance CBs to reduce false positives below. */ offloaded = rcu_rdp_is_offloaded(rdp); if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { + + // We get here if a grace period was needed (“needgp”) + // and the above call to rcu_accelerate_cbs() did not set + // the RCU_GP_FLAG_INIT bit in ->gp_state (which records + // the need for another grace period).  The purpose + // of the “offloaded” check is to avoid invoking + // rcu_accelerate_cbs() on an offloaded CPU because we do not + // hold the ->nocb_lock needed to safely access an offloaded + // ->cblist.  We do not want to acquire that lock because + // it can be heavily contended during callback floods. + WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); WRITE_ONCE(rcu_state.gp_req_activity, jiffies); - trace_rcu_grace_period(rcu_state.name, - rcu_state.gp_seq, - TPS("newreq")); + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); } else { - WRITE_ONCE(rcu_state.gp_flags, - rcu_state.gp_flags & RCU_GP_FLAG_INIT); + + // We get here either if there is no need for an + // additional grace period or if rcu_accelerate_cbs() has + // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.  + // So all we need to do is to clear all of the other + // ->gp_flags bits. + + WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); } raw_spin_unlock_irq_rcu_node(rnp); -- cgit v1.2.3 From 70ae7b0ce03347fab35d6d8df81e1165d7ea8045 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 14 Mar 2022 14:37:38 +0100 Subject: rcu: Fix preemption mode check on synchronize_rcu[_expedited]() An early check on synchronize_rcu[_expedited]() tries to determine if the current CPU is in UP mode on an SMP no-preempt kernel, in which case there is no need to start a grace period since the current assumed quiescent state is all we need. However the preemption mode doesn't take into account the boot selected preemption mode under CONFIG_PREEMPT_DYNAMIC=y, missing a possible early return if the running flavour is "none" or "voluntary". Use the shiny new preempt mode accessors to fix this. However, avoid invoking them during early boot because doing so triggers a WARN_ON_ONCE(). [ paulmck: Update for mainlined API. ] Reported-by: Paul E. McKenney Signed-off-by: Frederic Weisbecker Cc: Uladzislau Rezki Cc: Joel Fernandes Cc: Boqun Feng Cc: Peter Zijlstra Cc: Neeraj Upadhyay Cc: Valentin Schneider Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/rcu/tree.c') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 29669070348e..d3caa82b9954 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3741,7 +3741,9 @@ static int rcu_blocking_is_gp(void) { int ret; - if (IS_ENABLED(CONFIG_PREEMPTION)) + // Invoking preempt_model_*() too early gets a splat. + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE || + preempt_model_full() || preempt_model_rt()) return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; might_sleep(); /* Check for RCU read-side critical section. */ preempt_disable(); -- cgit v1.2.3