summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-06-26 01:52:50 +0300
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-18 00:58:52 +0300
commit543c6158f6dff20a741dfa492771f18ceaa1a109 (patch)
tree8897854acacf5f31bbd4f861a8b2f9e43dd0190d /kernel/rcu
parent28f00767e3db933cacc3030f4d9736acd037be2c (diff)
downloadlinux-543c6158f6dff20a741dfa492771f18ceaa1a109.tar.xz
rcu: Make synchronize_rcu_expedited() use sequence-counter scheme
Although synchronize_rcu_expedited() uses a sequence-counter scheme, it is based on a single increment per grace period, which means that tasks piggybacking off of concurrent grace periods may be forced to wait longer than necessary. This commit therefore applies the new sequence-count functions developed for synchronize_sched_expedited() to speed things up a bit and to consolidate the sequence-counter implementation. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_plugin.h16
1 files changed, 6 insertions, 10 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index ef41c1b04ba6..759883f51de7 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -536,7 +536,6 @@ void synchronize_rcu(void)
EXPORT_SYMBOL_GPL(synchronize_rcu);
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-static unsigned long sync_rcu_preempt_exp_count;
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
/*
@@ -704,12 +703,10 @@ void synchronize_rcu_expedited(void)
{
struct rcu_node *rnp;
struct rcu_state *rsp = rcu_state_p;
- unsigned long snap;
+ unsigned long s;
int trycount = 0;
- smp_mb(); /* Caller's modifications seen first by other CPUs. */
- snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
- smp_mb(); /* Above access cannot bleed into critical section. */
+ s = rcu_exp_gp_seq_snap(rsp);
/*
* Acquire lock, falling back to synchronize_rcu() if too many
@@ -717,8 +714,7 @@ void synchronize_rcu_expedited(void)
* expedited grace period for us, just leave.
*/
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
- if (ULONG_CMP_LT(snap,
- READ_ONCE(sync_rcu_preempt_exp_count)))
+ if (rcu_exp_gp_seq_done(rsp, s))
goto mb_ret; /* Others did our work for us. */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
@@ -727,8 +723,9 @@ void synchronize_rcu_expedited(void)
return;
}
}
- if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
+ if (rcu_exp_gp_seq_done(rsp, s))
goto unlock_mb_ret; /* Others did our work for us. */
+ rcu_exp_gp_seq_start(rsp);
/* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited();
@@ -750,8 +747,7 @@ void synchronize_rcu_expedited(void)
sync_rcu_preempt_exp_done(rnp));
/* Clean up and exit. */
- smp_mb(); /* ensure expedited GP seen before counter increment. */
- WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
+ rcu_exp_gp_seq_end(rsp);
unlock_mb_ret:
mutex_unlock(&sync_rcu_preempt_exp_mutex);
mb_ret: