summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2025-05-09 14:12:54 +0300
committerJens Axboe <axboe@kernel.dk>2025-05-12 16:52:52 +0300
commit8fb7aee05591fd4d3dca1460448a59e95fa821c3 (patch)
treee8bc51a80fee546a04799a688b94495a9dae079d /include/linux
parent63de899cb6220357dea9d0f4e5aa459ff5193bb0 (diff)
downloadlinux-8fb7aee05591fd4d3dca1460448a59e95fa821c3.tar.xz
io_uring: drain based on allocates reqs
Don't rely on CQ sequence numbers for draining, as it has become messy and needs cq_extra adjustments. Instead, base it on the number of allocated requests and only allow flushing when all requests are in the drain list. As a result, cq_extra is gone, no overhead for its accounting in aux cqe posting, less bloating as it was inlined before, and it's in general simpler than trying to track where we should bump it and where it should be put back like in cases of overflow. Also, it'll likely help with cleaning and unifying some of the CQ posting helpers. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/46ece1e34320b046c06fee2498d6b4cd12a700f2.1746788718.git.asml.silence@gmail.com Link: https://lore.kernel.org/r/24497b04b004bceada496033d3c9d09ff8e81ae9.1746944903.git.asml.silence@gmail.com [axboe: fold in fix from link2] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/io_uring_types.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 73b289b48280..00dbd7cd0e7d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -341,7 +341,6 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- unsigned cq_extra;
void *cq_wait_arg;
size_t cq_wait_size;
@@ -417,6 +416,7 @@ struct io_ring_ctx {
struct callback_head poll_wq_task_work;
struct list_head defer_list;
+ unsigned nr_drained;
struct io_alloc_cache msg_cache;
spinlock_t msg_lock;