diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-25 02:02:09 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-27 02:32:16 +0300 |
commit | 1e9a038b7fe9a8c10ef1238f4e695d5fbe0dd594 (patch) | |
tree | d26c6a12f4a9d34fbb55a1169dabeff447810cc5 /include/linux/srcutree.h | |
parent | 7f6733c3c648ddd6cf459c1b80ad388a95452955 (diff) | |
download | linux-1e9a038b7fe9a8c10ef1238f4e695d5fbe0dd594.tar.xz |
srcu: Expedited grace periods with reduced memory contention
Commit f60d231a87c5 ("srcu: Crude control of expedited grace periods")
introduced a per-srcu_struct atomic counter to track outstanding
requests for grace periods. This works, but represents a memory-contention
bottleneck. This commit therefore uses the srcu_node combining tree
to remove this bottleneck.
This commit adds new ->srcu_gp_seq_needed_exp fields to the
srcu_data, srcu_node, and srcu_struct structures, which track the
farthest-in-the-future grace period that must be expedited, which in
turn requires that all nearer-term grace periods also be expedited.
Requests for expediting start with the srcu_data structure, run up
through the srcu_node tree, and end at the srcu_struct structure.
Note that it may be necessary to expedite a grace period that just
now started, and this is handled by a new srcu_funnel_exp_start()
function, which is invoked when the grace period itself is already
in its way, but when that grace period was not marked as expedited.
A new srcu_get_delay() function returns zero if there is at least one
expedited SRCU grace period in flight, or SRCU_INTERVAL otherwise.
This function is used to calculate delays: Normal grace periods
are allowed to extend in order to cover more requests with a given
grace-period computation, which decreases per-request overhead.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Mike Galbraith <efault@gmx.de>
Diffstat (limited to 'include/linux/srcutree.h')
-rw-r--r-- | include/linux/srcutree.h | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 3865717df124..86df48d3e97b 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -43,6 +43,7 @@ struct srcu_data { spinlock_t lock ____cacheline_internodealigned_in_smp; struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ struct delayed_work work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ @@ -63,6 +64,7 @@ struct srcu_node { /* is > ->srcu_gq_seq. */ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */ /* have CBs for given GP? */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ struct srcu_node *srcu_parent; /* Next up in tree. */ int grplo; /* Least CPU for node. */ int grphi; /* Biggest CPU for node. */ @@ -81,7 +83,7 @@ struct srcu_struct { unsigned int srcu_idx; /* Current rdr array element. */ unsigned long srcu_gp_seq; /* Grace-period seq #. */ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ - atomic_t srcu_exp_cnt; /* # ongoing expedited GPs. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ |