From cc44ca848f5e517aeca9f5eabbe13609a3f71450 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 21 Aug 2015 19:42:44 +0200 Subject: rcu: Create rcu_sync infrastructure The rcu_sync infrastructure can be thought of as infrastructure to be used to implement reader-writer primitives having extremely lightweight readers during times when there are no writers. The first use is in the percpu_rwsem used by the VFS subsystem. This infrastructure is functionally equivalent to struct rcu_sync_struct { atomic_t counter; }; /* Check possibility of fast-path read-side operations. */ static inline bool rcu_sync_is_idle(struct rcu_sync_struct *rss) { return atomic_read(&rss->counter) == 0; } /* Tell readers to use slowpaths. */ static inline void rcu_sync_enter(struct rcu_sync_struct *rss) { atomic_inc(&rss->counter); synchronize_sched(); } /* Allow readers to once again use fastpaths. */ static inline void rcu_sync_exit(struct rcu_sync_struct *rss) { synchronize_sched(); atomic_dec(&rss->counter); } The main difference is that it records the state and only calls synchronize_sched() if required. At least some of the calls to synchronize_sched() will be optimized away when rcu_sync_enter() and rcu_sync_exit() are invoked repeatedly in quick succession. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcu_sync.h | 94 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 include/linux/rcu_sync.h (limited to 'include') diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h new file mode 100644 index 000000000000..cb044df2e21c --- /dev/null +++ b/include/linux/rcu_sync.h @@ -0,0 +1,94 @@ +/* + * RCU-based infrastructure for lightweight reader-writer locking + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (c) 2015, Red Hat, Inc. + * + * Author: Oleg Nesterov + */ + +#ifndef _LINUX_RCU_SYNC_H_ +#define _LINUX_RCU_SYNC_H_ + +#include +#include + +/* Structure to mediate between updaters and fastpath-using readers. */ +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + + int cb_state; + struct rcu_head cb_head; + + void (*sync)(void); + void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); +}; + +#define ___RCU_SYNC_INIT(name) \ + .gp_state = 0, \ + .gp_count = 0, \ + .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0 + +#define __RCU_SCHED_SYNC_INIT(name) { \ + ___RCU_SYNC_INIT(name), \ + .sync = synchronize_sched, \ + .call = call_rcu_sched, \ +} + +#define __RCU_BH_SYNC_INIT(name) { \ + ___RCU_SYNC_INIT(name), \ + .sync = synchronize_rcu_bh, \ + .call = call_rcu_bh, \ +} + +#define __RCU_SYNC_INIT(name) { \ + ___RCU_SYNC_INIT(name), \ + .sync = synchronize_rcu, \ + .call = call_rcu, \ +} + +#define DEFINE_RCU_SCHED_SYNC(name) \ + struct rcu_sync name = __RCU_SCHED_SYNC_INIT(name) + +#define DEFINE_RCU_BH_SYNC(name) \ + struct rcu_sync name = __RCU_BH_SYNC_INIT(name) + +#define DEFINE_RCU_SYNC(name) \ + struct rcu_sync name = __RCU_SYNC_INIT(name) + +/** + * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? + * @rsp: Pointer to rcu_sync structure to use for synchronization + * + * Returns true if readers are permitted to use their fastpaths. + * Must be invoked within an RCU read-side critical section whose + * flavor matches that of the rcu_sync struture. + */ +static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) +{ + return !rsp->gp_state; /* GP_IDLE */ +} + +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + +extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_enter(struct rcu_sync *); +extern void rcu_sync_exit(struct rcu_sync *); + +#endif /* _LINUX_RCU_SYNC_H_ */ -- cgit v1.2.3 From 82e8c565be8a72957570d7da8dd9b441db7bb648 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 21 Aug 2015 19:42:47 +0200 Subject: rcu_sync: Simplify rcu_sync using new rcu_sync_ops structure This commit adds the new struct rcu_sync_ops which holds sync/call methods, and turns the function pointers in rcu_sync_struct into an array of struct rcu_sync_ops. This simplifies the "init" helpers by collapsing a switch statement and explicit multiple definitions into a simple assignment and a helper macro, respectively. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcu_sync.h | 60 +++++++++++++++++++----------------------------- kernel/rcu/sync.c | 42 +++++++++++++++++---------------- 2 files changed, 45 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index cb044df2e21c..c6d2272c4459 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -26,6 +26,8 @@ #include #include +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; @@ -35,43 +37,9 @@ struct rcu_sync { int cb_state; struct rcu_head cb_head; - void (*sync)(void); - void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); + enum rcu_sync_type gp_type; }; -#define ___RCU_SYNC_INIT(name) \ - .gp_state = 0, \ - .gp_count = 0, \ - .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ - .cb_state = 0 - -#define __RCU_SCHED_SYNC_INIT(name) { \ - ___RCU_SYNC_INIT(name), \ - .sync = synchronize_sched, \ - .call = call_rcu_sched, \ -} - -#define __RCU_BH_SYNC_INIT(name) { \ - ___RCU_SYNC_INIT(name), \ - .sync = synchronize_rcu_bh, \ - .call = call_rcu_bh, \ -} - -#define __RCU_SYNC_INIT(name) { \ - ___RCU_SYNC_INIT(name), \ - .sync = synchronize_rcu, \ - .call = call_rcu, \ -} - -#define DEFINE_RCU_SCHED_SYNC(name) \ - struct rcu_sync name = __RCU_SCHED_SYNC_INIT(name) - -#define DEFINE_RCU_BH_SYNC(name) \ - struct rcu_sync name = __RCU_BH_SYNC_INIT(name) - -#define DEFINE_RCU_SYNC(name) \ - struct rcu_sync name = __RCU_SYNC_INIT(name) - /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization @@ -85,10 +53,28 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) return !rsp->gp_state; /* GP_IDLE */ } -enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; - extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); +#define __RCU_SYNC_INITIALIZER(name, type) { \ + .gp_state = 0, \ + .gp_count = 0, \ + .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0, \ + .gp_type = type, \ + } + +#define __DEFINE_RCU_SYNC(name, type) \ + struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) + +#define DEFINE_RCU_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SYNC) + +#define DEFINE_RCU_SCHED_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) + +#define DEFINE_RCU_BH_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) + #endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 0a11df43be23..5a9aa4c394f1 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -23,6 +23,24 @@ #include #include +static const struct { + void (*sync)(void); + void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); +} gp_ops[] = { + [RCU_SYNC] = { + .sync = synchronize_rcu, + .call = call_rcu, + }, + [RCU_SCHED_SYNC] = { + .sync = synchronize_sched, + .call = call_rcu_sched, + }, + [RCU_BH_SYNC] = { + .sync = synchronize_rcu_bh, + .call = call_rcu_bh, + }, +}; + enum { GP_IDLE = 0, GP_PENDING, GP_PASSED }; enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; @@ -37,23 +55,7 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) { memset(rsp, 0, sizeof(*rsp)); init_waitqueue_head(&rsp->gp_wait); - - switch (type) { - case RCU_SYNC: - rsp->sync = synchronize_rcu; - rsp->call = call_rcu; - break; - - case RCU_SCHED_SYNC: - rsp->sync = synchronize_sched; - rsp->call = call_rcu_sched; - break; - - case RCU_BH_SYNC: - rsp->sync = synchronize_rcu_bh; - rsp->call = call_rcu_bh; - break; - } + rsp->gp_type = type; } /** @@ -85,7 +87,7 @@ void rcu_sync_enter(struct rcu_sync *rsp) BUG_ON(need_wait && need_sync); if (need_sync) { - rsp->sync(); + gp_ops[rsp->gp_type].sync(); rsp->gp_state = GP_PASSED; wake_up_all(&rsp->gp_wait); } else if (need_wait) { @@ -138,7 +140,7 @@ static void rcu_sync_func(struct rcu_head *rcu) * to catch a later GP. */ rsp->cb_state = CB_PENDING; - rsp->call(&rsp->cb_head, rcu_sync_func); + gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); } else { /* * We're at least a GP after rcu_sync_exit(); eveybody will now @@ -166,7 +168,7 @@ void rcu_sync_exit(struct rcu_sync *rsp) if (!--rsp->gp_count) { if (rsp->cb_state == CB_IDLE) { rsp->cb_state = CB_PENDING; - rsp->call(&rsp->cb_head, rcu_sync_func); + gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); } else if (rsp->cb_state == CB_PENDING) { rsp->cb_state = CB_REPLAY; } -- cgit v1.2.3 From 3a518b76af7bb411efe6dd090fbf098e29accb2e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 21 Aug 2015 19:42:50 +0200 Subject: rcu_sync: Add CONFIG_PROVE_RCU checks This commit validates that the caller of rcu_sync_is_idle() holds the corresponding type of RCU read-side lock, but only in kernels built with CONFIG_PROVE_RCU=y. This validation is carried out via a new rcu_sync_ops->held() method that is checked within rcu_sync_is_idle(). Note that although this does add code to the fast path, it only does so in kernels built with CONFIG_PROVE_RCU=y. Suggested-by: "Paul E. McKenney" Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcu_sync.h | 6 ++++++ kernel/rcu/sync.c | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index c6d2272c4459..1f2d4fc30b04 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -40,6 +40,8 @@ struct rcu_sync { enum rcu_sync_type gp_type; }; +extern bool __rcu_sync_is_idle(struct rcu_sync *); + /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization @@ -50,7 +52,11 @@ struct rcu_sync { */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { +#ifdef CONFIG_PROVE_RCU + return __rcu_sync_is_idle(rsp); +#else return !rsp->gp_state; /* GP_IDLE */ +#endif } extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 5a9aa4c394f1..01c9807a7f73 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -23,21 +23,33 @@ #include #include +#ifdef CONFIG_PROVE_RCU +#define __INIT_HELD(func) .held = func, +#else +#define __INIT_HELD(func) +#endif + static const struct { void (*sync)(void); void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); +#ifdef CONFIG_PROVE_RCU + int (*held)(void); +#endif } gp_ops[] = { [RCU_SYNC] = { .sync = synchronize_rcu, .call = call_rcu, + __INIT_HELD(rcu_read_lock_held) }, [RCU_SCHED_SYNC] = { .sync = synchronize_sched, .call = call_rcu_sched, + __INIT_HELD(rcu_read_lock_sched_held) }, [RCU_BH_SYNC] = { .sync = synchronize_rcu_bh, .call = call_rcu_bh, + __INIT_HELD(rcu_read_lock_bh_held) }, }; @@ -46,6 +58,14 @@ enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; #define rss_lock gp_wait.lock +#ifdef CONFIG_PROVE_RCU +bool __rcu_sync_is_idle(struct rcu_sync *rsp) +{ + WARN_ON(!gp_ops[rsp->gp_type].held()); + return rsp->gp_state == GP_IDLE; +} +#endif + /** * rcu_sync_init() - Initialize an rcu_sync structure * @rsp: Pointer to rcu_sync structure to be initialized -- cgit v1.2.3 From 07899a6e5f56136028c44a57ad0451e797365ac3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 21 Aug 2015 19:42:52 +0200 Subject: rcu_sync: Introduce rcu_sync_dtor() This commit allows rcu_sync structures to be safely deallocated, The trick is to add a new ->wait field to the gp_ops array. This field is a pointer to the rcu_barrier() function corresponding to the flavor of RCU in question. This allows a new rcu_sync_dtor() to wait for any outstanding callbacks before freeing the rcu_sync structure. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcu_sync.h | 1 + kernel/rcu/sync.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 1f2d4fc30b04..8069d6468bc4 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -62,6 +62,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); +extern void rcu_sync_dtor(struct rcu_sync *); #define __RCU_SYNC_INITIALIZER(name, type) { \ .gp_state = 0, \ diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 01c9807a7f73..1e353f0a2b66 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -32,6 +32,7 @@ static const struct { void (*sync)(void); void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); + void (*wait)(void); #ifdef CONFIG_PROVE_RCU int (*held)(void); #endif @@ -39,16 +40,19 @@ static const struct { [RCU_SYNC] = { .sync = synchronize_rcu, .call = call_rcu, + .wait = rcu_barrier, __INIT_HELD(rcu_read_lock_held) }, [RCU_SCHED_SYNC] = { .sync = synchronize_sched, .call = call_rcu_sched, + .wait = rcu_barrier_sched, __INIT_HELD(rcu_read_lock_sched_held) }, [RCU_BH_SYNC] = { .sync = synchronize_rcu_bh, .call = call_rcu_bh, + .wait = rcu_barrier_bh, __INIT_HELD(rcu_read_lock_bh_held) }, }; @@ -195,3 +199,25 @@ void rcu_sync_exit(struct rcu_sync *rsp) } spin_unlock_irq(&rsp->rss_lock); } + +/** + * rcu_sync_dtor() - Clean up an rcu_sync structure + * @rsp: Pointer to rcu_sync structure to be cleaned up + */ +void rcu_sync_dtor(struct rcu_sync *rsp) +{ + int cb_state; + + BUG_ON(rsp->gp_count); + + spin_lock_irq(&rsp->rss_lock); + if (rsp->cb_state == CB_REPLAY) + rsp->cb_state = CB_PENDING; + cb_state = rsp->cb_state; + spin_unlock_irq(&rsp->rss_lock); + + if (cb_state != CB_IDLE) { + gp_ops[rsp->gp_type].wait(); + BUG_ON(rsp->cb_state != CB_IDLE); + } +} -- cgit v1.2.3 From 001dac627ff37433d5528ffb0d897cd19c2b1e43 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 21 Aug 2015 19:42:57 +0200 Subject: locking/percpu-rwsem: Make use of the rcu_sync infrastructure Currently down_write/up_write calls synchronize_sched_expedited() twice, which is evil. Change this code to rely on rcu-sync primitives. This avoids the _expedited "big hammer", and this can be faster in the contended case or even in the case when a single thread does down_write/up_write in a loop. Of course, a single down_write() will take more time, but otoh it will be much more friendly to the whole system. To simplify the review this patch doesn't update the comments, fixed by the next change. Signed-off-by: Oleg Nesterov Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/percpu-rwsem.h | 3 ++- kernel/locking/percpu-rwsem.c | 18 +++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 834c4e52cb2d..c2fa3ecb0dce 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -5,11 +5,12 @@ #include #include #include +#include #include struct percpu_rw_semaphore { + struct rcu_sync rss; unsigned int __percpu *fast_read_ctr; - atomic_t write_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 9529a30ec57b..183a71151ac0 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -17,7 +17,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ __init_rwsem(&brw->rw_sem, name, rwsem_key); - atomic_set(&brw->write_ctr, 0); + rcu_sync_init(&brw->rss, RCU_SCHED_SYNC); atomic_set(&brw->slow_read_ctr, 0); init_waitqueue_head(&brw->write_waitq); return 0; @@ -33,6 +33,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *brw) if (!brw->fast_read_ctr) return; + rcu_sync_dtor(&brw->rss); free_percpu(brw->fast_read_ctr); brw->fast_read_ctr = NULL; /* catch use after free bugs */ } @@ -62,13 +63,12 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *brw) */ static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) { - bool success = false; + bool success; preempt_disable(); - if (likely(!atomic_read(&brw->write_ctr))) { + success = rcu_sync_is_idle(&brw->rss); + if (likely(success)) __this_cpu_add(*brw->fast_read_ctr, val); - success = true; - } preempt_enable(); return success; @@ -149,8 +149,6 @@ static int clear_fast_ctr(struct percpu_rw_semaphore *brw) */ void percpu_down_write(struct percpu_rw_semaphore *brw) { - /* tell update_fast_ctr() there is a pending writer */ - atomic_inc(&brw->write_ctr); /* * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read * so that update_fast_ctr() can't succeed. @@ -162,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *brw) * fast-path, it executes a full memory barrier before we return. * See R_W case in the comment above update_fast_ctr(). */ - synchronize_sched_expedited(); + rcu_sync_enter(&brw->rss); /* exclude other writers, and block the new readers completely */ down_write(&brw->rw_sem); @@ -183,8 +181,6 @@ void percpu_up_write(struct percpu_rw_semaphore *brw) * Insert the barrier before the next fast-path in down_read, * see W_R case in the comment above update_fast_ctr(). */ - synchronize_sched_expedited(); - /* the last writer unblocks update_fast_ctr() */ - atomic_dec(&brw->write_ctr); + rcu_sync_exit(&brw->rss); } EXPORT_SYMBOL_GPL(percpu_up_write); -- cgit v1.2.3 From 4bace7344d6dbd7a1b0b801abf24ea9878064317 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 11 Sep 2015 17:59:18 +0200 Subject: rcu_sync: Cleanup the CONFIG_PROVE_RCU checks 1. Rename __rcu_sync_is_idle() to rcu_sync_lockdep_assert() and change it to use rcu_lockdep_assert(). 2. Change rcu_sync_is_idle() to return rsp->gp_state == GP_IDLE unconditonally, this way we can remove the same check from rcu_sync_lockdep_assert() and clearly isolate the debugging code. Note: rcu_sync_enter()->wait_event(gp_state == GP_PASSED) needs another CONFIG_PROVE_RCU check, the same as is done in ->sync(); but this needs some simple preparations in the core RCU code to avoid the code duplication. Signed-off-by: Oleg Nesterov Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcu_sync.h | 7 +++---- kernel/rcu/sync.c | 6 +++--- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 8069d6468bc4..a63a33e6196e 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -40,7 +40,7 @@ struct rcu_sync { enum rcu_sync_type gp_type; }; -extern bool __rcu_sync_is_idle(struct rcu_sync *); +extern void rcu_sync_lockdep_assert(struct rcu_sync *); /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? @@ -53,10 +53,9 @@ extern bool __rcu_sync_is_idle(struct rcu_sync *); static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { #ifdef CONFIG_PROVE_RCU - return __rcu_sync_is_idle(rsp); -#else - return !rsp->gp_state; /* GP_IDLE */ + rcu_sync_lockdep_assert(rsp); #endif + return !rsp->gp_state; /* GP_IDLE */ } extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 1e353f0a2b66..be922c9f3d37 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -63,10 +63,10 @@ enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; #define rss_lock gp_wait.lock #ifdef CONFIG_PROVE_RCU -bool __rcu_sync_is_idle(struct rcu_sync *rsp) +void rcu_sync_lockdep_assert(struct rcu_sync *rsp) { - WARN_ON(!gp_ops[rsp->gp_type].held()); - return rsp->gp_state == GP_IDLE; + RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), + "suspicious rcu_sync_is_idle() usage"); } #endif -- cgit v1.2.3