diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-02-01 06:23:24 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-02-21 03:12:27 +0300 |
commit | 9a414201ae7ea089699a0cbd36533345ca17233b (patch) | |
tree | 90c0c64e89136a0cc09df8cdf06eb8cffa9a6a58 | |
parent | 274afd6bfa64984724963204b832cc71565a7740 (diff) | |
download | linux-9a414201ae7ea089699a0cbd36533345ca17233b.tar.xz |
rcu: Add more tracing of expedited grace periods
This commit adds more tracing of expedited grace periods to enable
improved debugging of slowdowns.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | include/trace/events/rcu.h | 3 | ||||
-rw-r--r-- | kernel/rcu/rcu.h | 8 | ||||
-rw-r--r-- | kernel/rcu/tree_exp.h | 12 |
3 files changed, 22 insertions, 1 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 0b50fda80db0..e56a618f2a59 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -179,6 +179,9 @@ TRACE_EVENT(rcu_grace_period_init, * * "snap": Captured snapshot of expedited grace period sequence number. * "start": Started a real expedited grace period. + * "reset": Started resetting the tree + * "select": Started selecting the CPUs to wait on. + * "startwait": Started waiting on selected CPUs. * "end": Ended a real expedited grace period. * "endwake": Woke piggybackers up. * "done": Someone else did the expedited grace period for us. diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 5d13f651cf08..507a0802c717 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -77,12 +77,18 @@ static inline void rcu_seq_start(unsigned long *sp) WARN_ON_ONCE(rcu_seq_state(*sp) != 1); } +/* Compute the end-of-grace-period value for the specified sequence number. */ +static inline unsigned long rcu_seq_endval(unsigned long *sp) +{ + return (*sp | RCU_SEQ_STATE_MASK) + 1; +} + /* Adjust sequence number for end of update-side operation. */ static inline void rcu_seq_end(unsigned long *sp) { smp_mb(); /* Ensure update-side operation before counter increment. */ WARN_ON_ONCE(!rcu_seq_state(*sp)); - WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1); + WRITE_ONCE(*sp, rcu_seq_endval(sp)); } /* Take a snapshot of the update side's sequence number. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 46d61b597731..70ad12abde36 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -29,6 +29,15 @@ static void rcu_exp_gp_seq_start(struct rcu_state *rsp) } /* + * Return then value that expedited-grace-period counter will have + * at the end of the current grace period. + */ +static unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp) +{ + return rcu_seq_endval(&rsp->expedited_sequence); +} + +/* * Record the end of an expedited grace period. */ static void rcu_exp_gp_seq_end(struct rcu_state *rsp) @@ -366,7 +375,9 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, int ret; struct rcu_node *rnp; + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); sync_exp_reset_tree(rsp); + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select")); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -443,6 +454,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) struct rcu_node *rnp_root = rcu_get_root(rsp); int ret; + trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait")); jiffies_stall = rcu_jiffies_till_stall_check(); jiffies_start = jiffies; |