diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-06-15 10:24:05 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-06-15 17:51:34 +0300 |
commit | 67c4d9e693e3bb7fb968af24e3584f821a78ba56 (patch) | |
tree | 42e04a457b996b4609606b411f5aa16481d54f7d /fs | |
parent | 44e728b8aae0bb6d4229129083974f9dea43f50b (diff) | |
download | linux-67c4d9e693e3bb7fb968af24e3584f821a78ba56.tar.xz |
io_uring: batch cancel in io_uring_cancel_files()
Instead of waiting for each request one by one, first try to cancel all
of them in a batched manner, and then go over inflight_list/etc to reap
leftovers.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index ad5128a40c14..b6bcd5a7f4bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7366,9 +7366,22 @@ static int io_uring_release(struct inode *inode, struct file *file) return 0; } +static bool io_wq_files_match(struct io_wq_work *work, void *data) +{ + struct files_struct *files = data; + + return work->files == files; +} + static void io_uring_cancel_files(struct io_ring_ctx *ctx, struct files_struct *files) { + if (list_empty_careful(&ctx->inflight_list)) + return; + + /* cancel all at once, should be faster than doing it one by one*/ + io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); + while (!list_empty_careful(&ctx->inflight_list)) { struct io_kiocb *cancel_req = NULL, *req; DEFINE_WAIT(wait); |