diff options
author | Sasha Levin <sashal@kernel.org> | 2020-02-09 21:30:59 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-02-11 15:37:28 +0300 |
commit | e70e204634812661679360f95c0f910717610d3b (patch) | |
tree | c93272031df6c4c713bf02dd787d6cd1f8120e60 | |
parent | c0e6f4d4f90bde85cc83afd0f4bace13b71308fd (diff) | |
download | linux-e70e204634812661679360f95c0f910717610d3b.tar.xz |
io_uring: enable option to only trigger eventfd for async completions
[ Upstream commit f2842ab5b72d7ee5f7f8385c2d4f32c133f5837b ]
If an application is using eventfd notifications with poll to know when
new SQEs can be issued, it's expecting the following read/writes to
complete inline. And with that, it knows that there are events available,
and don't want spurious wakeups on the eventfd for those requests.
This adds IORING_REGISTER_EVENTFD_ASYNC, which works just like
IORING_REGISTER_EVENTFD, except it only triggers notifications for events
that happen from async completions (IRQ, or io-wq worker completions).
Any completions inline from the submission itself will not trigger
notifications.
Suggested-by: Mark Papadakis <markuspapadakis@icloud.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | fs/io_uring.c | 17 | ||||
-rw-r--r-- | include/uapi/linux/io_uring.h | 1 |
2 files changed, 17 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 95fc5c5a8596..131087782bec 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -188,6 +188,7 @@ struct io_ring_ctx { bool account_mem; bool cq_overflow_flushed; bool drain_next; + bool eventfd_async; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -735,13 +736,20 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) return &rings->cqes[tail & ctx->cq_mask]; } +static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) +{ + if (!ctx->eventfd_async) + return true; + return io_wq_current_is_worker() || in_interrupt(); +} + static void io_cqring_ev_posted(struct io_ring_ctx *ctx) { if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); if (waitqueue_active(&ctx->sqo_wait)) wake_up(&ctx->sqo_wait); - if (ctx->cq_ev_fd) + if (ctx->cq_ev_fd && io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); } @@ -5486,10 +5494,17 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = io_sqe_files_update(ctx, arg, nr_args); break; case IORING_REGISTER_EVENTFD: + case IORING_REGISTER_EVENTFD_ASYNC: ret = -EINVAL; if (nr_args != 1) break; ret = io_eventfd_register(ctx, arg); + if (ret) + break; + if (opcode == IORING_REGISTER_EVENTFD_ASYNC) + ctx->eventfd_async = 1; + else + ctx->eventfd_async = 0; break; case IORING_UNREGISTER_EVENTFD: ret = -EINVAL; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 55cfcb71606d..88693fed2c4b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -175,6 +175,7 @@ struct io_uring_params { #define IORING_REGISTER_EVENTFD 4 #define IORING_UNREGISTER_EVENTFD 5 #define IORING_REGISTER_FILES_UPDATE 6 +#define IORING_REGISTER_EVENTFD_ASYNC 7 struct io_uring_files_update { __u32 offset; |