diff options
author | Jens Axboe <axboe@kernel.dk> | 2023-06-02 17:41:46 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-06-02 17:55:37 +0300 |
commit | c92fcfc2bab54451c4f1481755ea244f413455cb (patch) | |
tree | c4a1474020ef5cf5138b5b34c5f35476ce8a1d49 | |
parent | f026be0e1e881e3395c3d5418ffc8c2a2203c3f3 (diff) | |
download | linux-c92fcfc2bab54451c4f1481755ea244f413455cb.tar.xz |
io_uring: avoid indirect function calls for the hottest task_work
We use task_work for a variety of reasons, but doing completions or
triggering rety after poll are by far the hottest two. Use the indirect
funtion call wrappers to avoid the indirect function call if
CONFIG_RETPOLINE is set.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | io_uring/io_uring.c | 9 | ||||
-rw-r--r-- | io_uring/poll.c | 2 | ||||
-rw-r--r-- | io_uring/poll.h | 2 | ||||
-rw-r--r-- | io_uring/rw.c | 2 | ||||
-rw-r--r-- | io_uring/rw.h | 1 |
5 files changed, 12 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c99a7a0c3f21..fc511cb6761d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -95,6 +95,7 @@ #include "timeout.h" #include "poll.h" +#include "rw.h" #include "alloc_cache.h" #define IORING_MAX_ENTRIES 32768 @@ -1205,7 +1206,9 @@ static unsigned int handle_tw_list(struct llist_node *node, ts->locked = mutex_trylock(&(*ctx)->uring_lock); percpu_ref_get(&(*ctx)->refs); } - req->io_task_work.func(req, ts); + INDIRECT_CALL_2(req->io_task_work.func, + io_poll_task_func, io_req_rw_complete, + req, ts); node = next; count++; if (unlikely(need_resched())) { @@ -1415,7 +1418,9 @@ again: struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); prefetch(container_of(next, struct io_kiocb, io_task_work.node)); - req->io_task_work.func(req, ts); + INDIRECT_CALL_2(req->io_task_work.func, + io_poll_task_func, io_req_rw_complete, + req, ts); ret++; node = next; } diff --git a/io_uring/poll.c b/io_uring/poll.c index c90e47dc1e29..9689806d3c16 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -326,7 +326,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) return IOU_POLL_NO_ACTION; } -static void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) +void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) { int ret; diff --git a/io_uring/poll.h b/io_uring/poll.h index b2393b403a2c..ff4d5d753387 100644 --- a/io_uring/poll.h +++ b/io_uring/poll.h @@ -38,3 +38,5 @@ bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, bool cancel_all); void io_apoll_cache_free(struct io_cache_entry *entry); + +void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/rw.c b/io_uring/rw.c index 3f118ed46e4f..c23d8baf0287 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -283,7 +283,7 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res) return res; } -static void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) +void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) { io_req_io_end(req); diff --git a/io_uring/rw.h b/io_uring/rw.h index 3b733f4b610a..4b89f9659366 100644 --- a/io_uring/rw.h +++ b/io_uring/rw.h @@ -22,3 +22,4 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags); int io_writev_prep_async(struct io_kiocb *req); void io_readv_writev_cleanup(struct io_kiocb *req); void io_rw_fail(struct io_kiocb *req); +void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts); |