summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--io_uring/io_uring.c37
-rw-r--r--io_uring/refs.h7
2 files changed, 9 insertions, 35 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cc5fa4e1b344..8bd5db2056ee 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -928,7 +928,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *rsrc_node = NULL;
/*
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
@@ -945,42 +944,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
if (!io_fill_cqe_req(ctx, req))
io_req_cqe_overflow(req);
}
-
- /*
- * If we're the last reference to this request, add to our locked
- * free_list cache.
- */
- if (req_ref_put_and_test(req)) {
- if (req->flags & IO_REQ_LINK_FLAGS) {
- if (req->flags & IO_DISARM_MASK)
- io_disarm_next(req);
- if (req->link) {
- io_req_task_queue(req->link);
- req->link = NULL;
- }
- }
- io_put_kbuf_comp(req);
- if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
- io_clean_op(req);
- io_put_file(req);
-
- rsrc_node = req->rsrc_node;
- /*
- * Selected buffer deallocation in io_clean_op() assumes that
- * we don't hold ->completion_lock. Clean them here to avoid
- * deadlocks.
- */
- io_put_task_remote(req->task);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
- }
io_cq_unlock_post(ctx);
- if (rsrc_node) {
- io_ring_submit_lock(ctx, issue_flags);
- io_put_rsrc_node(ctx, rsrc_node);
- io_ring_submit_unlock(ctx, issue_flags);
- }
+ /* called from io-wq submit work only, the ref won't drop to zero */
+ req_ref_put(req);
}
void io_req_defer_failed(struct io_kiocb *req, s32 res)
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 1336de3f2a30..63982ead9f7d 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
+static inline void req_ref_put(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ atomic_dec(&req->refs);
+}
+
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {