summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-21 16:57:18 +0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-07-20 08:38:53 +0400
commitbe0e1e21ef707be4d16ea6a96ac9997463e4b8d2 (patch)
treefe53c0731f5c3a6e56e2358eee9b8f17c3af83f4 /kernel
parent7765be2fec0f476fcd61812d5f9406b04c765020 (diff)
downloadlinux-be0e1e21ef707be4d16ea6a96ac9997463e4b8d2.tar.xz
rcu: Streamline code produced by __rcu_read_unlock()
Given some common flag combinations, particularly -Os, gcc will inline rcu_read_unlock_special() despite its being in an unlikely() clause. Use noinline to prohibit this misoptimization. In addition, move the second barrier() in __rcu_read_unlock() so that it is not on the common-case code path. This will allow the compiler to generate better code for the common-case path through __rcu_read_unlock(). Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree_plugin.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3a0ae0355222..4d2c068ba13e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -284,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -391,11 +391,11 @@ void __rcu_read_unlock(void)
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
+ if (--t->rcu_read_lock_nesting == 0) {
+ barrier(); /* decr before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ }
#ifdef CONFIG_PROVE_LOCKING
WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
#endif /* #ifdef CONFIG_PROVE_LOCKING */