summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@fb.com>2022-08-30 15:50:08 +0300
committerJens Axboe <axboe@kernel.dk>2022-09-21 19:30:42 +0300
commitb4c98d59a787eff4c8ee983bcf68266ce2199df6 (patch)
treec2e48e078d4d9e066f26f5733e503b4c4641dd2d /io_uring
parent32d91f0590080597d5fc46c0c36d8885c241622e (diff)
downloadlinux-b4c98d59a787eff4c8ee983bcf68266ce2199df6.tar.xz
io_uring: introduce io_has_work
This will be used later to know if the ring has outstanding work. Right now just if there is overflow CQEs to copy to the main CQE ring, but later will include deferred tasks Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220830125013.570060-3-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b328805d103a..471472fe9a56 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2146,6 +2146,11 @@ struct io_wait_queue {
unsigned nr_timeouts;
};
+static inline bool io_has_work(struct io_ring_ctx *ctx)
+{
+ return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
+}
+
static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
@@ -2164,13 +2169,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
{
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq);
+ struct io_ring_ctx *ctx = iowq->ctx;
/*
* Cannot safely flush overflowed CQEs from here, ensure we wake up
* the task, and the next invocation will do it.
*/
- if (io_should_wake(iowq) ||
- test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq))
+ if (io_should_wake(iowq) || io_has_work(ctx))
return autoremove_wake_function(curr, mode, wake_flags, key);
return -1;
}
@@ -2506,8 +2511,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* Users may get EPOLLIN meanwhile seeing nothing in cqring, this
* pushs them to do the flush.
*/
- if (io_cqring_events(ctx) ||
- test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+
+ if (io_cqring_events(ctx) || io_has_work(ctx))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;