diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-03-25 18:36:03 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-05-26 00:50:49 +0300 |
commit | 43e903ad3e0843d03da15d8eaffb5ada22966c76 (patch) | |
tree | b4145d17c55323defaab03f8f202394e34b045cc /kernel/rcu | |
parent | 0864f057b050bc6dd68106b3185e02db5140012d (diff) | |
download | linux-43e903ad3e0843d03da15d8eaffb5ada22966c76.tar.xz |
rcu: Inline invoke_rcu_callbacks() into its sole remaining caller
This commit saves a few lines of code by inlining invoke_rcu_callbacks()
into its sole remaining caller.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 20 |
1 files changed, 3 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8e290163505a..7822a2e1370d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -147,7 +147,6 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); -static void invoke_rcu_callbacks(struct rcu_data *rdp); static void rcu_report_exp_rdp(struct rcu_data *rdp); static void sync_sched_exp_online_cleanup(int cpu); @@ -2296,8 +2295,9 @@ static __latent_entropy void rcu_core(void) rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); /* If there are callbacks ready, invoke them. */ - if (rcu_segcblist_ready_cbs(&rdp->cblist)) - invoke_rcu_callbacks(rdp); + if (rcu_segcblist_ready_cbs(&rdp->cblist) && + likely(READ_ONCE(rcu_scheduler_fully_active))) + rcu_do_batch(rdp); /* Do any needed deferred wakeups of rcuo kthreads. */ do_nocb_deferred_wakeup(rdp); @@ -2333,20 +2333,6 @@ static void invoke_rcu_core_kthread(void) } /* - * Do RCU callback invocation. Not that if we are running !use_softirq, - * we are already in the rcuc kthread. If callbacks are offloaded, then - * ->cblist is always empty, so we don't get here. Therefore, we only - * ever need to check for the scheduler being operational (some callbacks - * do wakeups, so we do need the scheduler). - */ -static void invoke_rcu_callbacks(struct rcu_data *rdp) -{ - if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) - return; - rcu_do_batch(rdp); -} - -/* * Wake up this CPU's rcuc kthread to do RCU core processing. */ static void invoke_rcu_core(void) |