diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-05-01 20:26:57 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-13 01:37:48 +0300 |
commit | 7a1d0f23ad70cd4813bf4b72735ea2c26a4f53fe (patch) | |
tree | 83c5e674a73d05ebc38687b9754d1509366e29b5 /kernel/rcu/tree.h | |
parent | aebc82644b2c8eafa15e8c481fbafc1b41f4fbf9 (diff) | |
download | linux-7a1d0f23ad70cd4813bf4b72735ea2c26a4f53fe.tar.xz |
rcu: Move from ->need_future_gp[] to ->gp_seq_needed
One problem with the ->need_future_gp[] array is that the grace-period
assignment of each element changes as the grace periods complete.
This means that it is necessary to hold a lock when checking this
array to learn if a given grace period has already been requested.
This increase lock contention, which is the opposite of helpful.
This commit therefore replaces the ->need_future_gp[] with a single
->gp_seq_needed value and keeps it updated in the rcu_data structure.
This will enable reliable lockless checking of whether or not a given
grace period has already been requested.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r-- | kernel/rcu/tree.h | 19 |
1 files changed, 2 insertions, 17 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a21d403a6010..9329c1ff695f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -88,6 +88,7 @@ struct rcu_node { /* This will either be equal to or one */ /* behind the root rcu_node's gpnum. */ unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */ + unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */ unsigned long completedqs; /* All QSes done for this node. */ unsigned long qsmask; /* CPUs or groups that need to switch in */ /* order for current grace period to proceed.*/ @@ -160,7 +161,6 @@ struct rcu_node { struct swait_queue_head nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - u8 need_future_gp[4]; /* Counts of upcoming GP requests. */ raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; spinlock_t exp_lock ____cacheline_internodealigned_in_smp; @@ -170,22 +170,6 @@ struct rcu_node { bool exp_need_flush; /* Need to flush workitem? */ } ____cacheline_internodealigned_in_smp; -/* Accessors for ->need_future_gp[] array. */ -#define need_future_gp_mask() \ - (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1) -#define need_future_gp_element(rnp, c) \ - ((rnp)->need_future_gp[(c >> RCU_SEQ_CTR_SHIFT) & need_future_gp_mask()]) -#define need_any_future_gp(rnp) \ -({ \ - int __i; \ - bool __nonzero = false; \ - \ - for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \ - __nonzero = __nonzero || \ - READ_ONCE((rnp)->need_future_gp[__i]); \ - __nonzero; \ -}) - /* * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and * are indexed relative to this interval rather than the global CPU ID space. @@ -213,6 +197,7 @@ struct rcu_data { unsigned long gpnum; /* Highest gp number that this CPU */ /* is aware of having started. */ unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */ + unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */ unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ /* for rcu_all_qs() invocations. */ union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |