summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-09-24 16:12:27 +0300
committerJens Axboe <axboe@kernel.dk>2021-09-24 19:24:34 +0300
commit8bab4c09f24ec8d4a7a78ab343620f89d3a24804 (patch)
treeab8e7448a252e0f935e3c52f8985ea69478b2b7a
parent5b7aa38d86f348847a48f71e9ac7715406de900e (diff)
downloadlinux-8bab4c09f24ec8d4a7a78ab343620f89d3a24804.tar.xz
io_uring: allow conditional reschedule for intensive iterators
If we have a lot of threads and rings, the tctx list can get quite big. This is especially true if we keep creating new threads and rings. Likewise for the provided buffers list. Be nice and insert a conditional reschedule point while iterating the nodes for deletion. Link: https://lore.kernel.org/io-uring/00000000000064b6b405ccb41113@google.com/ Reported-by: syzbot+111d2a03f51f5ae73775@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7707cdb7b372..ef3c94a55fbd 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9173,8 +9173,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf;
unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf)
+ xa_for_each(&ctx->io_buffers, index, buf) {
__io_remove_buffers(ctx, buf, index, -1U);
+ cond_resched();
+ }
}
static void io_req_cache_free(struct list_head *list)
@@ -9672,8 +9674,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
struct io_tctx_node *node;
unsigned long index;
- xa_for_each(&tctx->xa, index, node)
+ xa_for_each(&tctx->xa, index, node) {
io_uring_del_tctx_node(index);
+ cond_resched();
+ }
if (wq) {
/*
* Must be after io_uring_del_task_file() (removes nodes under