summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-01-04 18:46:23 +0300
committerJens Axboe <axboe@kernel.dk>2024-08-25 17:27:01 +0300
commitcebf123c634ab78d39af94caf0fc9cd2c60d82c3 (patch)
treee0a23fc104066b7b9dd1254c3d48a9a7b4b2dfcd /io_uring
parent45a41e74b8f472254c64b42713bad0686350b0c6 (diff)
downloadlinux-cebf123c634ab78d39af94caf0fc9cd2c60d82c3.tar.xz
io_uring: implement our own schedule timeout handling
In preparation for having two distinct timeouts and avoid waking the task if we don't need to. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c35
-rw-r--r--io_uring/io_uring.h2
2 files changed, 33 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 9e2b8d4c05db..c443bac8bad8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2350,6 +2350,34 @@ static bool current_pending_io(void)
return percpu_counter_read_positive(&tctx->inflight);
}
+static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
+{
+ struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
+
+ WRITE_ONCE(iowq->hit_timeout, 1);
+ wake_up_process(iowq->wq.private);
+ return HRTIMER_NORESTART;
+}
+
+static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
+ clockid_t clock_id)
+{
+ iowq->hit_timeout = 0;
+ hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS);
+ iowq->t.function = io_cqring_timer_wakeup;
+ hrtimer_set_expires_range_ns(&iowq->t, iowq->timeout, 0);
+ hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
+
+ if (!READ_ONCE(iowq->hit_timeout))
+ schedule();
+
+ hrtimer_cancel(&iowq->t);
+ destroy_hrtimer_on_stack(&iowq->t);
+ __set_current_state(TASK_RUNNING);
+
+ return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
+}
+
static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
@@ -2362,11 +2390,10 @@ static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
*/
if (current_pending_io())
current->in_iowait = 1;
- if (iowq->timeout == KTIME_MAX)
+ if (iowq->timeout != KTIME_MAX)
+ ret = io_cqring_schedule_timeout(iowq, ctx->clockid);
+ else
schedule();
- else if (!schedule_hrtimeout_range_clock(&iowq->timeout, 0,
- HRTIMER_MODE_ABS, ctx->clockid))
- ret = -ETIME;
current->in_iowait = 0;
return ret;
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 9935819f12b7..f95c1b080f4b 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -40,7 +40,9 @@ struct io_wait_queue {
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
+ int hit_timeout;
ktime_t timeout;
+ struct hrtimer t;
#ifdef CONFIG_NET_RX_BUSY_POLL
ktime_t napi_busy_poll_dt;