diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-07-27 19:25:55 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-07-27 19:49:48 +0300 |
commit | 773af69121ecc6c53d192661af8d53bb3db028ae (patch) | |
tree | 65fb03a8f411cab376e5d0cc47145afd1f3143f1 /fs | |
parent | 110aa25c3ce417a44e35990cf8ed22383277933a (diff) | |
download | linux-773af69121ecc6c53d192661af8d53bb3db028ae.tar.xz |
io_uring: always reissue from task_work context
As a safeguard, if we're going to queue async work, do it from task_work
from the original task. This ensures that we can always sanely create
threads, regards of what the reissue context may be.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index a4331deb0427..6ba101cd4661 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2060,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req) io_req_task_work_add(req); } +static void io_req_task_queue_reissue(struct io_kiocb *req) +{ + req->io_task_work.func = io_queue_async_work; + io_req_task_work_add(req); +} + static inline void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = io_req_find_next(req); @@ -2248,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, !(req->flags & REQ_F_DONT_REISSUE)) { req->iopoll_completed = 0; req_ref_get(req); - io_queue_async_work(req); + io_req_task_queue_reissue(req); continue; } @@ -2771,7 +2777,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, req->flags &= ~REQ_F_REISSUE; if (io_resubmit_prep(req)) { req_ref_get(req); - io_queue_async_work(req); + io_req_task_queue_reissue(req); } else { int cflags = 0; |