summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-25 21:32:42 +0300
committerJens Axboe <axboe@kernel.dk>2021-03-27 23:09:11 +0300
commit1ee4160c73b2102a52bc97a4128a89c34821414f (patch)
tree6972412a96f6cb6c26a1b6d60200b758eca3d33d /fs/io_uring.c
parent1e4cf0d3d072173ee70757ee4aec11b2839705f9 (diff)
downloadlinux-1ee4160c73b2102a52bc97a4128a89c34821414f.tar.xz
io_uring: fix timeout cancel return code
When we cancel a timeout we should emit a sensible return code, like -ECANCELED but not 0, otherwise it may trick users. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/7b0ad1065e3bd1994722702bd0ba9e7bc9b0683b.1616696997.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2d43f7b87083..229ab9bfb45b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1247,7 +1247,7 @@ static void io_queue_async_work(struct io_kiocb *req)
io_queue_linked_timeout(link);
}
-static void io_kill_timeout(struct io_kiocb *req)
+static void io_kill_timeout(struct io_kiocb *req, int status)
{
struct io_timeout_data *io = req->async_data;
int ret;
@@ -1257,7 +1257,7 @@ static void io_kill_timeout(struct io_kiocb *req)
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
- io_cqring_fill_event(req, 0);
+ io_cqring_fill_event(req, status);
io_put_req_deferred(req, 1);
}
}
@@ -1274,7 +1274,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
- io_kill_timeout(req);
+ io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
@@ -1326,7 +1326,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
break;
list_del_init(&req->timeout.list);
- io_kill_timeout(req);
+ io_kill_timeout(req, 0);
} while (!list_empty(&ctx->timeout_list));
ctx->cq_last_tm_flush = seq;