summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2018-11-29 22:50:04 +0300
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-01-26 02:28:29 +0300
commit892307266429f6439803f2735a59a4bc58a6ded4 (patch)
treeedd1fd34ac78990fc2e5e2e7a1318ee636b22813 /kernel/rcu
parente5bc3af7734f90278a47906d917852a85544510b (diff)
downloadlinux-892307266429f6439803f2735a59a4bc58a6ded4.tar.xz
rcu: Inline _synchronize_rcu_expedited() into synchronize_rcu_expedited()
Now that _synchronize_rcu_expedited() has only one caller, and given that this is a tail call, this commit inlines _synchronize_rcu_expedited() into synchronize_rcu_expedited(). Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_exp.h81
1 files changed, 36 insertions, 45 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 353d113c0cd4..d882ca0cd01b 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -599,50 +599,6 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
rcu_exp_sel_wait_wake(rewp->rew_s);
}
-/*
- * Given a smp_call_function() handler, kick off the specified
- * implementation of expedited grace period.
- */
-static void _synchronize_rcu_expedited(void)
-{
- struct rcu_data *rdp;
- struct rcu_exp_work rew;
- struct rcu_node *rnp;
- unsigned long s;
-
- /* If expedited grace periods are prohibited, fall back to normal. */
- if (rcu_gp_is_normal()) {
- wait_rcu_gp(call_rcu);
- return;
- }
-
- /* Take a snapshot of the sequence number. */
- s = rcu_exp_gp_seq_snap();
- if (exp_funnel_lock(s))
- return; /* Someone else did our work for us. */
-
- /* Ensure that load happens before action based on it. */
- if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
- /* Direct call during scheduler init and early_initcalls(). */
- rcu_exp_sel_wait_wake(s);
- } else {
- /* Marshall arguments & schedule the expedited grace period. */
- rew.rew_s = s;
- INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
- queue_work(rcu_gp_wq, &rew.rew_work);
- }
-
- /* Wait for expedited grace period to complete. */
- rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
- rnp = rcu_get_root();
- wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
- sync_exp_work_done(s));
- smp_mb(); /* Workqueue actions happen before return. */
-
- /* Let the next expedited grace period start. */
- mutex_unlock(&rcu_state.exp_mutex);
-}
-
#ifdef CONFIG_PREEMPT_RCU
/*
@@ -792,6 +748,11 @@ static void sync_sched_exp_online_cleanup(int cpu)
*/
void synchronize_rcu_expedited(void)
{
+ struct rcu_data *rdp;
+ struct rcu_exp_work rew;
+ struct rcu_node *rnp;
+ unsigned long s;
+
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -801,6 +762,36 @@ void synchronize_rcu_expedited(void)
if (rcu_blocking_is_gp())
return;
- _synchronize_rcu_expedited();
+ /* If expedited grace periods are prohibited, fall back to normal. */
+ if (rcu_gp_is_normal()) {
+ wait_rcu_gp(call_rcu);
+ return;
+ }
+
+ /* Take a snapshot of the sequence number. */
+ s = rcu_exp_gp_seq_snap();
+ if (exp_funnel_lock(s))
+ return; /* Someone else did our work for us. */
+
+ /* Ensure that load happens before action based on it. */
+ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+ /* Direct call during scheduler init and early_initcalls(). */
+ rcu_exp_sel_wait_wake(s);
+ } else {
+ /* Marshall arguments & schedule the expedited grace period. */
+ rew.rew_s = s;
+ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+ queue_work(rcu_gp_wq, &rew.rew_work);
+ }
+
+ /* Wait for expedited grace period to complete. */
+ rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
+ rnp = rcu_get_root();
+ wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
+ sync_exp_work_done(s));
+ smp_mb(); /* Workqueue actions happen before return. */
+
+ /* Let the next expedited grace period start. */
+ mutex_unlock(&rcu_state.exp_mutex);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);