summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-08-21 20:42:52 +0300
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-10-06 21:25:21 +0300
commit07899a6e5f56136028c44a57ad0451e797365ac3 (patch)
tree48fc600387bae30a64284ae0d7acfb767277cfde
parent3a518b76af7bb411efe6dd090fbf098e29accb2e (diff)
downloadlinux-07899a6e5f56136028c44a57ad0451e797365ac3.tar.xz
rcu_sync: Introduce rcu_sync_dtor()
This commit allows rcu_sync structures to be safely deallocated, The trick is to add a new ->wait field to the gp_ops array. This field is a pointer to the rcu_barrier() function corresponding to the flavor of RCU in question. This allows a new rcu_sync_dtor() to wait for any outstanding callbacks before freeing the rcu_sync structure. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--kernel/rcu/sync.c26
2 files changed, 27 insertions, 0 deletions
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index 1f2d4fc30b04..8069d6468bc4 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -62,6 +62,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
+extern void rcu_sync_dtor(struct rcu_sync *);
#define __RCU_SYNC_INITIALIZER(name, type) { \
.gp_state = 0, \
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 01c9807a7f73..1e353f0a2b66 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -32,6 +32,7 @@
static const struct {
void (*sync)(void);
void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
+ void (*wait)(void);
#ifdef CONFIG_PROVE_RCU
int (*held)(void);
#endif
@@ -39,16 +40,19 @@ static const struct {
[RCU_SYNC] = {
.sync = synchronize_rcu,
.call = call_rcu,
+ .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_held)
},
[RCU_SCHED_SYNC] = {
.sync = synchronize_sched,
.call = call_rcu_sched,
+ .wait = rcu_barrier_sched,
__INIT_HELD(rcu_read_lock_sched_held)
},
[RCU_BH_SYNC] = {
.sync = synchronize_rcu_bh,
.call = call_rcu_bh,
+ .wait = rcu_barrier_bh,
__INIT_HELD(rcu_read_lock_bh_held)
},
};
@@ -195,3 +199,25 @@ void rcu_sync_exit(struct rcu_sync *rsp)
}
spin_unlock_irq(&rsp->rss_lock);
}
+
+/**
+ * rcu_sync_dtor() - Clean up an rcu_sync structure
+ * @rsp: Pointer to rcu_sync structure to be cleaned up
+ */
+void rcu_sync_dtor(struct rcu_sync *rsp)
+{
+ int cb_state;
+
+ BUG_ON(rsp->gp_count);
+
+ spin_lock_irq(&rsp->rss_lock);
+ if (rsp->cb_state == CB_REPLAY)
+ rsp->cb_state = CB_PENDING;
+ cb_state = rsp->cb_state;
+ spin_unlock_irq(&rsp->rss_lock);
+
+ if (cb_state != CB_IDLE) {
+ gp_ops[rsp->gp_type].wait();
+ BUG_ON(rsp->cb_state != CB_IDLE);
+ }
+}