summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-06-23 04:38:58 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2024-06-23 07:57:21 +0300
commit06efa5f30c28eaf237247ca8c4cb46eb62cb6bd9 (patch)
tree5441281fa1f247b064c9bf33b3dffd56ab59d262 /lib
parent18e92841e87bc548fcb91530115a66e72eecb10c (diff)
downloadlinux-06efa5f30c28eaf237247ca8c4cb46eb62cb6bd9.tar.xz
closures: closure_get_not_zero(), closure_return_sync()
Provide new primitives for solving a lifetime issue with bcachefs btree_trans objects. closure_sync_return(): like closure_sync(), wait synchronously for any outstanding gets. like closure_return, the closure is considered "finished" and the ref left at 0. closure_get_not_zero(): get a ref on a closure if it's alive, i.e. the ref is not zero. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'lib')
-rw-r--r--lib/closure.c52
1 files changed, 46 insertions, 6 deletions
diff --git a/lib/closure.c b/lib/closure.c
index 2e1ee9fdec08..c971216d9d77 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,7 +13,7 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
r &= ~CLOSURE_GUARD_MASK;
- if (!r) {
- smp_acquire__after_ctrl_dep();
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- WARN(flags & ~CLOSURE_DESTRUCTOR,
- "closure ref hit 0 with incorrect flags set: %x (%u)",
- flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
+ smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
struct closure_syncer s = { .task = current };