summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-05 14:22:24 +0300
committerJens Axboe <axboe@kernel.dk>2023-01-30 01:17:39 +0300
commit3fcf19d592d5cb63eb209400b22055651e3c27d0 (patch)
tree7709c204357d7a58896f73272698835ef7a1c170 /io_uring/io_uring.c
parent140102ae9a9f2f83f0592b98b3c5c6119d9a9b32 (diff)
downloadlinux-3fcf19d592d5cb63eb209400b22055651e3c27d0.tar.xz
io_uring: parse check_cq out of wq waiting
We already avoid flushing overflows in io_cqring_wait_schedule() but only return an error for the outer loop to handle it. Minimise it even further by moving all ->check_cq parsing there. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/9dfcec3121013f98208dbf79368d636d74e1231a.1672916894.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 97b749203ba8..524ef5a2bb9c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2471,21 +2471,13 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
ktime_t *timeout)
{
int ret;
- unsigned long check_cq;
+ if (unlikely(READ_ONCE(ctx->check_cq)))
+ return 1;
/* make sure we run task_work before checking for signals */
ret = io_run_task_work_sig(ctx);
if (ret || io_should_wake(iowq))
return ret;
-
- check_cq = READ_ONCE(ctx->check_cq);
- if (unlikely(check_cq)) {
- /* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- return 1;
- if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
- return -EBADR;
- }
if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
return -ETIME;
@@ -2551,13 +2543,25 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
- finish_wait(&ctx->cq_wait, &iowq.wq);
- io_cqring_do_overflow_flush(ctx);
- }
+ unsigned long check_cq;
+
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+ /* let the caller flush overflows, retry */
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) {
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ io_cqring_do_overflow_flush(ctx);
+ }
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
+ ret = -EBADR;
+ break;
+ }
+ }
+
if (__io_cqring_events_user(ctx) >= min_events)
break;
cond_resched();