diff options
author | Hao Xu <haoxu@linux.alibaba.com> | 2021-12-07 12:39:49 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-12-08 01:01:57 +0300 |
commit | 9f8d032a364b2b579c6ce5a62b967056f8711e69 (patch) | |
tree | 87ebc071c99a1bd737df3463414d65225cb70e43 /fs/io_uring.c | |
parent | 4813c3779261fab4067edea28155a98c65a41b5f (diff) | |
download | linux-9f8d032a364b2b579c6ce5a62b967056f8711e69.tar.xz |
io_uring: add helper for task work execution code
Add a helper for task work execution code. We will use it later.
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Hao Xu <haoxu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20211207093951.247840-4-haoxu@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index ad389466a912..85f9459e9072 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2217,6 +2217,25 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) percpu_ref_put(&ctx->refs); } +static void handle_tw_list(struct io_wq_work_node *node, struct io_ring_ctx **ctx, bool *locked) +{ + do { + struct io_wq_work_node *next = node->next; + struct io_kiocb *req = container_of(node, struct io_kiocb, + io_task_work.node); + + if (req->ctx != *ctx) { + ctx_flush_and_put(*ctx, locked); + *ctx = req->ctx; + /* if not contended, grab and improve batching */ + *locked = mutex_trylock(&(*ctx)->uring_lock); + percpu_ref_get(&(*ctx)->refs); + } + req->io_task_work.func(req, locked); + node = next; + } while (node); +} + static void tctx_task_work(struct callback_head *cb) { bool locked = false; @@ -2239,22 +2258,7 @@ static void tctx_task_work(struct callback_head *cb) if (!node) break; - do { - struct io_wq_work_node *next = node->next; - struct io_kiocb *req = container_of(node, struct io_kiocb, - io_task_work.node); - - if (req->ctx != ctx) { - ctx_flush_and_put(ctx, &locked); - ctx = req->ctx; - /* if not contended, grab and improve batching */ - locked = mutex_trylock(&ctx->uring_lock); - percpu_ref_get(&ctx->refs); - } - req->io_task_work.func(req, &locked); - node = next; - } while (node); - + handle_tw_list(node, &ctx, &locked); cond_resched(); } |