summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@fb.com>2022-06-22 16:40:21 +0300
committerJens Axboe <axboe@kernel.dk>2022-07-25 03:39:15 +0300
commited5ccb3beeba0cadb0fcf353ae192021dfecf252 (patch)
treeb4586cdd66bb805d9806febb837a107773351a20 /io_uring/io_uring.c
parent024f15e033a52660a045947ee56c7e842180fa81 (diff)
downloadlinux-ed5ccb3beeba0cadb0fcf353ae192021dfecf252.tar.xz
io_uring: remove priority tw list optimisation
This optimisation has some built in assumptions that make it easy to introduce bugs. It also does not have clear wins that make it worth keeping. Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220622134028.2013417-2-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c77
1 files changed, 11 insertions, 66 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8bc63413fc54..d21d0fc3645b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -986,44 +986,6 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
percpu_ref_put(&ctx->refs);
}
-static void handle_prev_tw_list(struct io_wq_work_node *node,
- struct io_ring_ctx **ctx, bool *uring_locked)
-{
- if (*ctx && !*uring_locked)
- spin_lock(&(*ctx)->completion_lock);
-
- do {
- struct io_wq_work_node *next = node->next;
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
-
- prefetch(container_of(next, struct io_kiocb, io_task_work.node));
-
- if (req->ctx != *ctx) {
- if (unlikely(!*uring_locked && *ctx))
- io_cq_unlock_post(*ctx);
-
- ctx_flush_and_put(*ctx, uring_locked);
- *ctx = req->ctx;
- /* if not contended, grab and improve batching */
- *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
- percpu_ref_get(&(*ctx)->refs);
- if (unlikely(!*uring_locked))
- io_cq_lock(*ctx);
- }
- if (likely(*uring_locked)) {
- req->io_task_work.func(req, uring_locked);
- } else {
- req->cqe.flags = io_put_kbuf_comp(req);
- __io_req_complete_post(req);
- }
- node = next;
- } while (node);
-
- if (unlikely(!*uring_locked))
- io_cq_unlock_post(*ctx);
-}
-
static void handle_tw_list(struct io_wq_work_node *node,
struct io_ring_ctx **ctx, bool *locked)
{
@@ -1054,27 +1016,20 @@ void tctx_task_work(struct callback_head *cb)
task_work);
while (1) {
- struct io_wq_work_node *node1, *node2;
+ struct io_wq_work_node *node;
spin_lock_irq(&tctx->task_lock);
- node1 = tctx->prio_task_list.first;
- node2 = tctx->task_list.first;
+ node = tctx->task_list.first;
INIT_WQ_LIST(&tctx->task_list);
- INIT_WQ_LIST(&tctx->prio_task_list);
- if (!node2 && !node1)
+ if (!node)
tctx->task_running = false;
spin_unlock_irq(&tctx->task_lock);
- if (!node2 && !node1)
+ if (!node)
break;
-
- if (node1)
- handle_prev_tw_list(node1, &ctx, &uring_locked);
- if (node2)
- handle_tw_list(node2, &ctx, &uring_locked);
+ handle_tw_list(node, &ctx, &uring_locked);
cond_resched();
- if (data_race(!tctx->task_list.first) &&
- data_race(!tctx->prio_task_list.first) && uring_locked)
+ if (data_race(!tctx->task_list.first) && uring_locked)
io_submit_flush_completions(ctx);
}
@@ -1086,8 +1041,7 @@ void tctx_task_work(struct callback_head *cb)
}
static void __io_req_task_work_add(struct io_kiocb *req,
- struct io_uring_task *tctx,
- struct io_wq_work_list *list)
+ struct io_uring_task *tctx)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_wq_work_node *node;
@@ -1095,7 +1049,7 @@ static void __io_req_task_work_add(struct io_kiocb *req,
bool running;
spin_lock_irqsave(&tctx->task_lock, flags);
- wq_list_add_tail(&req->io_task_work.node, list);
+ wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
running = tctx->task_running;
if (!running)
tctx->task_running = true;
@@ -1113,7 +1067,8 @@ static void __io_req_task_work_add(struct io_kiocb *req,
spin_lock_irqsave(&tctx->task_lock, flags);
tctx->task_running = false;
- node = wq_list_merge(&tctx->prio_task_list, &tctx->task_list);
+ node = tctx->task_list.first;
+ INIT_WQ_LIST(&tctx->task_list);
spin_unlock_irqrestore(&tctx->task_lock, flags);
while (node) {
@@ -1129,17 +1084,7 @@ void io_req_task_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
- __io_req_task_work_add(req, tctx, &tctx->task_list);
-}
-
-void io_req_task_prio_work_add(struct io_kiocb *req)
-{
- struct io_uring_task *tctx = req->task->io_uring;
-
- if (req->ctx->flags & IORING_SETUP_SQPOLL)
- __io_req_task_work_add(req, tctx, &tctx->prio_task_list);
- else
- __io_req_task_work_add(req, tctx, &tctx->task_list);
+ __io_req_task_work_add(req, tctx);
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked)