summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-03-19 01:00:29 +0300
committerJens Axboe <axboe@kernel.dk>2024-04-15 17:10:24 +0300
commit92219afb980e01d88a1ac6d8fe01dcae255c4279 (patch)
tree95d35e41e2a0d2aaaa4f1611a65c9b9667617320
parent6e6b8c62120a22acd8cb759304e4cd2e3215d488 (diff)
downloadlinux-92219afb980e01d88a1ac6d8fe01dcae255c4279.tar.xz
io_uring: force tw ctx locking
We can run normal task_work without locking the ctx, however we try to lock anyway and most handlers prefer or require it locked. It might have been interesting to multi-submitter ring with high contention completing async read/write requests via task_work, however that will still need to go through io_req_complete_post() and potentially take the lock for rsrc node putting or some other case. In other words, it's hard to care about it, so alawys force the locking. The case described would also because of various io_uring caches. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Tested-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/6ae858f2ef562e6ed9f13c60978c0d48926954ba.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index e9da2ad67c2e..13fb5958454f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1185,8 +1185,9 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
if (req->ctx != ctx) {
ctx_flush_and_put(ctx, &ts);
ctx = req->ctx;
- /* if not contended, grab and improve batching */
- ts.locked = mutex_trylock(&ctx->uring_lock);
+
+ ts.locked = true;
+ mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs);
}
INDIRECT_CALL_2(req->io_task_work.func,
@@ -1447,11 +1448,9 @@ again:
if (io_run_local_work_continue(ctx, ret, min_events))
goto again;
- if (ts->locked) {
- io_submit_flush_completions(ctx);
- if (io_run_local_work_continue(ctx, ret, min_events))
- goto again;
- }
+ io_submit_flush_completions(ctx);
+ if (io_run_local_work_continue(ctx, ret, min_events))
+ goto again;
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
@@ -1475,14 +1474,12 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
{
- struct io_tw_state ts = {};
+ struct io_tw_state ts = { .locked = true };
int ret;
- ts.locked = mutex_trylock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
ret = __io_run_local_work(ctx, &ts, min_events);
- if (ts.locked)
- mutex_unlock(&ctx->uring_lock);
-
+ mutex_unlock(&ctx->uring_lock);
return ret;
}