summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-06-15 18:47:58 +0300
committerJens Axboe <axboe@kernel.dk>2021-06-16 00:44:34 +0300
commit2335f6f5ddf2f4621395fac5fa4b53d075828cc1 (patch)
tree62899c7826fe8befb552357858db6a311cd8467f /fs
parent3c19966d3710dbe5a44658c532052f11d797aecb (diff)
downloadlinux-2335f6f5ddf2f4621395fac5fa4b53d075828cc1.tar.xz
io_uring: optimise io_commit_cqring()
In most cases io_commit_cqring() is just an smp_store_release(), and it's hot enough, especially for IRQ rw, to want it to save on a function call. Mark it inline and extract a non-inlined slow path doing drain and timeout flushing. The inlined part is pretty slim to not cause binary bloating. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/7350f8b6b92caa50a48a80be39909f0d83eddd93.1623772051.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f63fc79df4eb..16156a655d8b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1344,14 +1344,18 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
ctx->cq_last_tm_flush = seq;
}
-static void io_commit_cqring(struct io_ring_ctx *ctx)
+static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active)) {
- if (ctx->off_timeout_used)
- io_flush_timeouts(ctx);
- if (ctx->drain_active)
- io_queue_deferred(ctx);
- }
+ if (ctx->off_timeout_used)
+ io_flush_timeouts(ctx);
+ if (ctx->drain_active)
+ io_queue_deferred(ctx);
+}
+
+static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+{
+ if (unlikely(ctx->off_timeout_used || ctx->drain_active))
+ __io_commit_cqring_flush(ctx);
/* order cqe stores with ring update */
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}