diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 21:33:08 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-13 21:33:08 +0300 |
commit | 54e60e505d6144a22c787b5be1fdce996a27be1b (patch) | |
tree | a0b582fa8d9de216fef4fb6320199bb055125c65 | |
parent | d523ec4c6af4314575d6ab8b52629ae3e2039a50 (diff) | |
parent | 5d772916855f593672de55c437925daccc8ecd73 (diff) | |
download | linux-54e60e505d6144a22c787b5be1fdce996a27be1b.tar.xz |
Merge tag 'for-6.2/io_uring-2022-12-08' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe:
- Always ensure proper ordering in case of CQ ring overflow, which then
means we can remove some work-arounds for that (Dylan)
- Support completion batching for multishot, greatly increasing the
efficiency for those (Dylan)
- Flag epoll/eventfd wakeups done from io_uring, so that we can easily
tell if we're recursing into io_uring again.
Previously, this would have resulted in repeated multishot
notifications if we had a dependency there. That could happen if an
eventfd was registered as the ring eventfd, and we multishot polled
for events on it. Or if an io_uring fd was added to epoll, and
io_uring had a multishot request for the epoll fd.
Test cases here:
https://git.kernel.dk/cgit/liburing/commit/?id=919755a7d0096fda08fb6d65ac54ad8d0fe027cd
Previously these got terminated when the CQ ring eventually
overflowed, now it's handled gracefully (me).
- Tightening of the IOPOLL based completions (Pavel)
- Optimizations of the networking zero-copy paths (Pavel)
- Various tweaks and fixes (Dylan, Pavel)
* tag 'for-6.2/io_uring-2022-12-08' of git://git.kernel.dk/linux: (41 commits)
io_uring: keep unlock_post inlined in hot path
io_uring: don't use complete_post in kbuf
io_uring: spelling fix
io_uring: remove io_req_complete_post_tw
io_uring: allow multishot polled reqs to defer completion
io_uring: remove overflow param from io_post_aux_cqe
io_uring: add lockdep assertion in io_fill_cqe_aux
io_uring: make io_fill_cqe_aux static
io_uring: add io_aux_cqe which allows deferred completion
io_uring: allow defer completion for aux posted cqes
io_uring: defer all io_req_complete_failed
io_uring: always lock in io_apoll_task_func
io_uring: remove iopoll spinlock
io_uring: iopoll protect complete_post
io_uring: inline __io_req_complete_put()
io_uring: remove io_req_tw_post_queue
io_uring: use io_req_task_complete() in timeout
io_uring: hold locks for io_req_complete_failed
io_uring: add completion locking for iopoll
io_uring: kill io_cqring_ev_posted() and __io_cq_unlock_post()
...
-rw-r--r-- | fs/eventfd.c | 37 | ||||
-rw-r--r-- | fs/eventpoll.c | 18 | ||||
-rw-r--r-- | include/linux/eventfd.h | 7 | ||||
-rw-r--r-- | include/linux/io_uring_types.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/eventpoll.h | 6 | ||||
-rw-r--r-- | include/uapi/linux/io_uring.h | 18 | ||||
-rw-r--r-- | io_uring/io_uring.c | 223 | ||||
-rw-r--r-- | io_uring/io_uring.h | 43 | ||||
-rw-r--r-- | io_uring/kbuf.c | 14 | ||||
-rw-r--r-- | io_uring/msg_ring.c | 4 | ||||
-rw-r--r-- | io_uring/net.c | 56 | ||||
-rw-r--r-- | io_uring/notif.c | 57 | ||||
-rw-r--r-- | io_uring/notif.h | 15 | ||||
-rw-r--r-- | io_uring/poll.c | 33 | ||||
-rw-r--r-- | io_uring/rsrc.c | 11 | ||||
-rw-r--r-- | io_uring/rw.c | 6 | ||||
-rw-r--r-- | io_uring/timeout.c | 10 | ||||
-rw-r--r-- | io_uring/uring_cmd.c | 2 |
18 files changed, 355 insertions, 207 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c index c0ffee99ad23..249ca6c0b784 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -43,21 +43,7 @@ struct eventfd_ctx { int id; }; -/** - * eventfd_signal - Adds @n to the eventfd counter. - * @ctx: [in] Pointer to the eventfd context. - * @n: [in] Value of the counter to be added to the eventfd internal counter. - * The value cannot be negative. - * - * This function is supposed to be called by the kernel in paths that do not - * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX - * value, and we signal this as overflow condition by returning a EPOLLERR - * to poll(2). - * - * Returns the amount by which the counter was incremented. This will be less - * than @n if the counter has overflowed. - */ -__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask) { unsigned long flags; @@ -78,12 +64,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) - wake_up_locked_poll(&ctx->wqh, EPOLLIN); + wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); current->in_eventfd = 0; spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; } + +/** + * eventfd_signal - Adds @n to the eventfd counter. + * @ctx: [in] Pointer to the eventfd context. + * @n: [in] Value of the counter to be added to the eventfd internal counter. + * The value cannot be negative. + * + * This function is supposed to be called by the kernel in paths that do not + * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX + * value, and we signal this as overflow condition by returning a EPOLLERR + * to poll(2). + * + * Returns the amount by which the counter was incremented. This will be less + * than @n if the counter has overflowed. + */ +__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +{ + return eventfd_signal_mask(ctx, n, 0); +} EXPORT_SYMBOL_GPL(eventfd_signal); static void eventfd_free_ctx(struct eventfd_ctx *ctx) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 52954d4637b5..64659b110973 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -491,7 +491,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { struct eventpoll *ep_src; unsigned long flags; @@ -522,16 +523,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) } spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests); ep->nests = nests + 1; - wake_up_locked_poll(&ep->poll_wait, EPOLLIN); + wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags); ep->nests = 0; spin_unlock_irqrestore(&ep->poll_wait.lock, flags); } #else -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { - wake_up_poll(&ep->poll_wait, EPOLLIN); + wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags); } #endif @@ -742,7 +744,7 @@ static void ep_free(struct eventpoll *ep) /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); /* * We need to lock this because we could be hit by @@ -1208,7 +1210,7 @@ out_unlock: /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, epi); + ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE); if (!(epi->event.events & EPOLLEXCLUSIVE)) ewake = 1; @@ -1553,7 +1555,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; } @@ -1629,7 +1631,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; } diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 3cd202d3eefb..36a486505b08 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -40,6 +40,7 @@ struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); @@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n) return -ENOSYS; } +static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, + unsigned mask) +{ + return -ENOSYS; +} + static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) { diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index f5b687a787a3..accdfecee953 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -174,7 +174,9 @@ struct io_submit_state { bool plug_started; bool need_plug; unsigned short submit_nr; + unsigned int cqes_count; struct blk_plug plug; + struct io_uring_cqe cqes[16]; }; struct io_ev_fd { diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index 8a3432d0f0dc..e687658843b1 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h @@ -41,6 +41,12 @@ #define EPOLLMSG (__force __poll_t)0x00000400 #define EPOLLRDHUP (__force __poll_t)0x00002000 +/* + * Internal flag - wakeup generated by io_uring, used to detect recursion back + * into the io_uring poll handler. + */ +#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27)) + /* Set exclusive wakeup mode for the target file descriptor */ #define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28)) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 2df3225b562f..9d4c4078e8d0 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -296,10 +296,28 @@ enum io_uring_op { * * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in * the buf_index field. + * + * IORING_SEND_ZC_REPORT_USAGE + * If set, SEND[MSG]_ZC should report + * the zerocopy usage in cqe.res + * for the IORING_CQE_F_NOTIF cqe. + * 0 is reported if zerocopy was actually possible. + * IORING_NOTIF_USAGE_ZC_COPIED if data was copied + * (at least partially). */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECV_MULTISHOT (1U << 1) #define IORING_RECVSEND_FIXED_BUF (1U << 2) +#define IORING_SEND_ZC_REPORT_USAGE (1U << 3) + +/* + * cqe.res for IORING_CQE_F_NOTIF if + * IORING_SEND_ZC_REPORT_USAGE was requested + * + * It should be treated as a flag, all other + * bits of cqe.res should be treated as reserved! + */ +#define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31) /* * accept flags stored in sqe->ioprio diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 61cd7ffd0f6a..3436e0b83534 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -167,7 +167,8 @@ EXPORT_SYMBOL(io_uring_get_socket); static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { - if (!wq_list_empty(&ctx->submit_state.compl_reqs)) + if (!wq_list_empty(&ctx->submit_state.compl_reqs) || + ctx->submit_state.cqes_count) __io_submit_flush_completions(ctx); } @@ -495,7 +496,7 @@ static void io_eventfd_ops(struct rcu_head *rcu) int ops = atomic_xchg(&ev_fd->ops, 0); if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT)) - eventfd_signal(ev_fd->cq_ev_fd, 1); + eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback * ordering in a race but if references are 0 we know we have to free @@ -531,7 +532,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) goto out; if (likely(eventfd_signal_allowed())) { - eventfd_signal(ev_fd->cq_ev_fd, 1); + eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); } else { atomic_inc(&ev_fd->refs); if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) @@ -581,23 +582,21 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx) io_eventfd_flush_signal(ctx); } -static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) -{ - io_commit_cqring_flush(ctx); - io_cqring_wake(ctx); -} - -static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) +/* keep it inlined for io_submit_flush_completions() */ +static inline void io_cq_unlock_post_inline(struct io_ring_ctx *ctx) __releases(ctx->completion_lock) { io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); - io_cqring_ev_posted(ctx); + + io_commit_cqring_flush(ctx); + io_cqring_wake(ctx); } void io_cq_unlock_post(struct io_ring_ctx *ctx) + __releases(ctx->completion_lock) { - __io_cq_unlock_post(ctx); + io_cq_unlock_post_inline(ctx); } /* Returns true if there are no backlogged entries after the flush */ @@ -778,11 +777,13 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow) return &rings->cqes[off]; } -bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, - bool allow_overflow) +static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, + bool allow_overflow) { struct io_uring_cqe *cqe; + lockdep_assert_held(&ctx->completion_lock); + ctx->cq_extra++; /* @@ -811,9 +812,23 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags return false; } -bool io_post_aux_cqe(struct io_ring_ctx *ctx, - u64 user_data, s32 res, u32 cflags, - bool allow_overflow) +static void __io_flush_post_cqes(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct io_submit_state *state = &ctx->submit_state; + unsigned int i; + + lockdep_assert_held(&ctx->uring_lock); + for (i = 0; i < state->cqes_count; i++) { + struct io_uring_cqe *cqe = &state->cqes[i]; + + io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags, true); + } + state->cqes_count = 0; +} + +static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, + bool allow_overflow) { bool filled; @@ -823,15 +838,58 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, return filled; } -static void __io_req_complete_put(struct io_kiocb *req) +bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) +{ + return __io_post_aux_cqe(ctx, user_data, res, cflags, true); +} + +bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, + bool allow_overflow) { + struct io_uring_cqe *cqe; + unsigned int length; + + if (!defer) + return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow); + + length = ARRAY_SIZE(ctx->submit_state.cqes); + + lockdep_assert_held(&ctx->uring_lock); + + if (ctx->submit_state.cqes_count == length) { + io_cq_lock(ctx); + __io_flush_post_cqes(ctx); + /* no need to flush - flush is deferred */ + spin_unlock(&ctx->completion_lock); + } + + /* For defered completions this is not as strict as it is otherwise, + * however it's main job is to prevent unbounded posted completions, + * and in that it works just as well. + */ + if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) + return false; + + cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++]; + cqe->user_data = user_data; + cqe->res = res; + cqe->flags = cflags; + return true; +} + +static void __io_req_complete_post(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + io_cq_lock(ctx); + if (!(req->flags & REQ_F_CQE_SKIP)) + __io_fill_cqe_req(ctx, req); + /* * If we're the last reference to this request, add to our locked * free_list cache. */ if (req_ref_put_and_test(req)) { - struct io_ring_ctx *ctx = req->ctx; - if (req->flags & IO_REQ_LINK_FLAGS) { if (req->flags & IO_DISARM_MASK) io_disarm_next(req); @@ -852,38 +910,35 @@ static void __io_req_complete_put(struct io_kiocb *req) wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; } -} - -void __io_req_complete_post(struct io_kiocb *req) -{ - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(req->ctx, req); - __io_req_complete_put(req); -} - -void io_req_complete_post(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - - io_cq_lock(ctx); - __io_req_complete_post(req); io_cq_unlock_post(ctx); } -inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) +void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) { - io_req_complete_post(req); + if (!(issue_flags & IO_URING_F_UNLOCKED) || + !(req->ctx->flags & IORING_SETUP_IOPOLL)) { + __io_req_complete_post(req); + } else { + struct io_ring_ctx *ctx = req->ctx; + + mutex_lock(&ctx->uring_lock); + __io_req_complete_post(req); + mutex_unlock(&ctx->uring_lock); + } } -void io_req_complete_failed(struct io_kiocb *req, s32 res) +void io_req_defer_failed(struct io_kiocb *req, s32 res) + __must_hold(&ctx->uring_lock) { const struct io_op_def *def = &io_op_defs[req->opcode]; + lockdep_assert_held(&req->ctx->uring_lock); + req_set_fail(req); io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); if (def->fail) def->fail(req); - io_req_complete_post(req); + io_req_complete_defer(req); } /* @@ -1105,6 +1160,20 @@ void tctx_task_work(struct callback_head *cb) trace_io_uring_task_work_run(tctx, count, loops); } +static __cold void io_fallback_tw(struct io_uring_task *tctx) +{ + struct llist_node *node = llist_del_all(&tctx->task_list); + struct io_kiocb *req; + + while (node) { + req = container_of(node, struct io_kiocb, io_task_work.node); + node = node->next; + if (llist_add(&req->io_task_work.node, + &req->ctx->fallback_llist)) + schedule_delayed_work(&req->ctx->fallback_work, 1); + } +} + static void io_req_local_work_add(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; @@ -1127,11 +1196,10 @@ static void io_req_local_work_add(struct io_kiocb *req) __io_cqring_wake(ctx); } -static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local) +void __io_req_task_work_add(struct io_kiocb *req, bool allow_local) { struct io_uring_task *tctx = req->task->io_uring; struct io_ring_ctx *ctx = req->ctx; - struct llist_node *node; if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) { io_req_local_work_add(req); @@ -1148,20 +1216,7 @@ static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) return; - node = llist_del_all(&tctx->task_list); - - while (node) { - req = container_of(node, struct io_kiocb, io_task_work.node); - node = node->next; - if (llist_add(&req->io_task_work.node, - &req->ctx->fallback_llist)) - schedule_delayed_work(&req->ctx->fallback_work, 1); - } -} - -void io_req_task_work_add(struct io_kiocb *req) -{ - __io_req_task_work_add(req, true); + io_fallback_tw(tctx); } static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) @@ -1237,23 +1292,10 @@ int io_run_local_work(struct io_ring_ctx *ctx) return ret; } -static void io_req_tw_post(struct io_kiocb *req, bool *locked) -{ - io_req_complete_post(req); -} - -void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags) -{ - io_req_set_res(req, res, cflags); - req->io_task_work.func = io_req_tw_post; - io_req_task_work_add(req); -} - static void io_req_task_cancel(struct io_kiocb *req, bool *locked) { - /* not needed for normal modes, but SQPOLL depends on it */ io_tw_lock(req->ctx, locked); - io_req_complete_failed(req, req->cqe.res); + io_req_defer_failed(req, req->cqe.res); } void io_req_task_submit(struct io_kiocb *req, bool *locked) @@ -1263,7 +1305,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked) if (likely(!(req->task->flags & PF_EXITING))) io_queue_sqe(req); else - io_req_complete_failed(req, -EFAULT); + io_req_defer_failed(req, -EFAULT); } void io_req_task_queue_fail(struct io_kiocb *req, int ret) @@ -1344,6 +1386,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) struct io_submit_state *state = &ctx->submit_state; io_cq_lock(ctx); + /* must come first to preserve CQE ordering in failure cases */ + if (state->cqes_count) + __io_flush_post_cqes(ctx); wq_list_for_each(node, prev, &state->compl_reqs) { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); @@ -1351,10 +1396,12 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) if (!(req->flags & REQ_F_CQE_SKIP)) __io_fill_cqe_req(ctx, req); } - __io_cq_unlock_post(ctx); + io_cq_unlock_post_inline(ctx); - io_free_batch_list(ctx, state->compl_reqs.first); - INIT_WQ_LIST(&state->compl_reqs); + if (!wq_list_empty(&ctx->submit_state.compl_reqs)) { + io_free_batch_list(ctx, state->compl_reqs.first); + INIT_WQ_LIST(&state->compl_reqs); + } } /* @@ -1476,16 +1523,10 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) void io_req_task_complete(struct io_kiocb *req, bool *locked) { - if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { - unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED; - - req->cqe.flags |= io_put_kbuf(req, issue_flags); - } - if (*locked) io_req_complete_defer(req); else - io_req_complete_post(req); + io_req_complete_post(req, IO_URING_F_UNLOCKED); } /* @@ -1635,6 +1676,7 @@ static u32 io_get_sequence(struct io_kiocb *req) } static __cold void io_drain_req(struct io_kiocb *req) + __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -1655,7 +1697,7 @@ queue: ret = io_req_prep_async(req); if (ret) { fail: - io_req_complete_failed(req, ret); + io_req_defer_failed(req, ret); return; } io_prep_async_link(req); @@ -1752,7 +1794,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_defer(req); else - io_req_complete_post(req); + io_req_complete_post(req, issue_flags); } else if (ret != IOU_ISSUE_SKIP_COMPLETE) return ret; @@ -1768,7 +1810,8 @@ int io_poll_issue(struct io_kiocb *req, bool *locked) io_tw_lock(req->ctx, locked); if (unlikely(req->task->flags & PF_EXITING)) return -EFAULT; - return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT); + return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| + IO_URING_F_COMPLETE_DEFER); } struct io_wq_work *io_wq_free_work(struct io_wq_work *work) @@ -1787,7 +1830,7 @@ void io_wq_submit_work(struct io_wq_work *work) bool needs_poll = false; int ret = 0, err = -ECANCELED; - /* one will be dropped by ->io_free_work() after returning to io-wq */ + /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ if (!(req->flags & REQ_F_REFCOUNT)) __io_req_set_refcount(req, 2); else @@ -1885,7 +1928,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) struct io_kiocb *linked_timeout; if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { - io_req_complete_failed(req, ret); + io_req_defer_failed(req, ret); return; } @@ -1935,14 +1978,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) */ req->flags &= ~REQ_F_HARDLINK; req->flags |= REQ_F_LINK; - io_req_complete_failed(req, req->cqe.res); + io_req_defer_failed(req, req->cqe.res); } else if (unlikely(req->ctx->drain_active)) { io_drain_req(req); } else { int ret = io_req_prep_async(req); if (unlikely(ret)) - io_req_complete_failed(req, ret); + io_req_defer_failed(req, ret); else io_queue_iowq(req, NULL); } @@ -2670,7 +2713,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) * lock(&ep->mtx); * * Users may get EPOLLIN meanwhile seeing nothing in cqring, this - * pushs them to do the flush. + * pushes them to do the flush. */ if (io_cqring_events(ctx) || io_has_work(ctx)) @@ -2871,7 +2914,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); - io_req_complete_failed(de->req, -ECANCELED); + io_req_task_queue_fail(de->req, -ECANCELED); kfree(de); } return true; @@ -4064,8 +4107,6 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ctx = f.file->private_data; - io_run_task_work_ctx(ctx); - mutex_lock(&ctx->uring_lock); ret = __io_uring_register(ctx, opcode, arg, nr_args); mutex_unlock(&ctx->uring_lock); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 50bc3af44953..062899b1fe86 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -4,6 +4,7 @@ #include <linux/errno.h> #include <linux/lockdep.h> #include <linux/io_uring_types.h> +#include <uapi/linux/eventpoll.h> #include "io-wq.h" #include "slist.h" #include "filetable.h" @@ -29,14 +30,11 @@ bool io_req_cqe_overflow(struct io_kiocb *req); int io_run_task_work_sig(struct io_ring_ctx *ctx); int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked); int io_run_local_work(struct io_ring_ctx *ctx); -void io_req_complete_failed(struct io_kiocb *req, s32 res); -void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); -void io_req_complete_post(struct io_kiocb *req); -void __io_req_complete_post(struct io_kiocb *req); -bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, - bool allow_overflow); -bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, - bool allow_overflow); +void io_req_defer_failed(struct io_kiocb *req, s32 res); +void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); +bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); +bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, + bool allow_overflow); void __io_commit_cqring_flush(struct io_ring_ctx *ctx); struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); @@ -50,10 +48,9 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) return req->flags & REQ_F_FIXED_FILE; } +void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); bool io_is_uring_fops(struct file *file); bool io_alloc_async_data(struct io_kiocb *req); -void io_req_task_work_add(struct io_kiocb *req); -void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); void io_req_task_queue(struct io_kiocb *req); void io_queue_iowq(struct io_kiocb *req, bool *dont_use); void io_req_task_complete(struct io_kiocb *req, bool *locked); @@ -82,6 +79,11 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx); bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, bool cancel_all); +static inline void io_req_task_work_add(struct io_kiocb *req) +{ + __io_req_task_work_add(req, true); +} + #define io_for_each_link(pos, head) \ for (pos = (head); pos; pos = pos->link) @@ -207,12 +209,18 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx) static inline void __io_cqring_wake(struct io_ring_ctx *ctx) { /* - * wake_up_all() may seem excessive, but io_wake_function() and - * io_should_wake() handle the termination of the loop and only - * wake as many waiters as we need to. + * Trigger waitqueue handler on all waiters on our waitqueue. This + * won't necessarily wake up all the tasks, io_should_wake() will make + * that decision. + * + * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter + * set in the mask so that if we recurse back into our own poll + * waitqueue handlers, we know we have a dependency between eventfd or + * epoll and should terminate multishot poll at that point. */ if (waitqueue_active(&ctx->cq_wait)) - wake_up_all(&ctx->cq_wait); + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } static inline void io_cqring_wake(struct io_ring_ctx *ctx) @@ -369,4 +377,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) ctx->submitter_task == current); } +static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) +{ + io_req_set_res(req, res, 0); + req->io_task_work.func = io_req_task_complete; + io_req_task_work_add(req); +} + #endif diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index e2c46889d5fa..4a6401080c1f 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -306,14 +306,11 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) if (!bl->buf_nr_pages) ret = __io_remove_buffers(ctx, bl, p->nbufs); } + io_ring_submit_unlock(ctx, issue_flags); if (ret < 0) req_set_fail(req); - - /* complete before unlock, IOPOLL may need the lock */ io_req_set_res(req, ret, 0); - __io_req_complete(req, issue_flags); - io_ring_submit_unlock(ctx, issue_flags); - return IOU_ISSUE_SKIP_COMPLETE; + return IOU_OK; } int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -458,13 +455,12 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ret = io_add_buffers(ctx, p, bl); err: + io_ring_submit_unlock(ctx, issue_flags); + if (ret < 0) req_set_fail(req); - /* complete before unlock, IOPOLL may need the lock */ io_req_set_res(req, ret, 0); - __io_req_complete(req, issue_flags); - io_ring_submit_unlock(ctx, issue_flags); - return IOU_ISSUE_SKIP_COMPLETE; + return IOU_OK; } int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 90d2fc6fd80e..afb543aab9f6 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -31,7 +31,7 @@ static int io_msg_ring_data(struct io_kiocb *req) if (msg->src_fd || msg->dst_fd || msg->flags) return -EINVAL; - if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) return 0; return -EOVERFLOW; @@ -116,7 +116,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) * completes with -EOVERFLOW, then the sender must ensure that a * later IORING_OP_MSG_RING delivers the message. */ - if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) + if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) ret = -EOVERFLOW; out_unlock: io_double_unlock_ctx(ctx, target_ctx, issue_flags); diff --git a/io_uring/net.c b/io_uring/net.c index 2818aeefea42..cb831326ea5b 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -125,13 +125,15 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, struct io_cache_entry *entry; struct io_async_msghdr *hdr; - if (!(issue_flags & IO_URING_F_UNLOCKED) && - (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) { - hdr = container_of(entry, struct io_async_msghdr, cache); - hdr->free_iov = NULL; - req->flags |= REQ_F_ASYNC_DATA; - req->async_data = hdr; - return hdr; + if (!(issue_flags & IO_URING_F_UNLOCKED)) { + entry = io_alloc_cache_get(&ctx->netmsg_cache); + if (entry) { + hdr = container_of(entry, struct io_async_msghdr, cache); + hdr->free_iov = NULL; + req->flags |= REQ_F_ASYNC_DATA; + req->async_data = hdr; + return hdr; + } } if (!io_alloc_async_data(req)) { @@ -599,16 +601,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, } if (!mshot_finished) { - if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret, - cflags | IORING_CQE_F_MORE, false)) { + if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, + req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) { io_recv_prep_retry(req); return false; } - /* - * Otherwise stop multishot but use the current result. - * Probably will end up going into overflow, but this means - * we cannot trust the ordering anymore - */ + /* Otherwise stop multishot but use the current result. */ } io_req_set_res(req, *ret, cflags); @@ -923,6 +921,9 @@ void io_send_zc_cleanup(struct io_kiocb *req) } } +#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) +#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) + int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); @@ -935,10 +936,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->flags & REQ_F_CQE_SKIP) return -EINVAL; - zc->flags = READ_ONCE(sqe->ioprio); - if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | - IORING_RECVSEND_FIXED_BUF)) - return -EINVAL; notif = zc->notif = io_alloc_notif(ctx); if (!notif) return -ENOMEM; @@ -946,6 +943,17 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) notif->cqe.res = 0; notif->cqe.flags = IORING_CQE_F_NOTIF; req->flags |= REQ_F_NEED_CLEANUP; + + zc->flags = READ_ONCE(sqe->ioprio); + if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { + if (zc->flags & ~IO_ZC_FLAGS_VALID) + return -EINVAL; + if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { + io_notif_set_extended(notif); + io_notif_to_data(notif)->zc_report = true; + } + } + if (zc->flags & IORING_RECVSEND_FIXED_BUF) { unsigned idx = READ_ONCE(sqe->buf_index); @@ -1087,6 +1095,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) return ret; msg.sg_from_iter = io_sg_from_iter; } else { + io_notif_set_extended(zc->notif); ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov, &msg.msg_iter); if (unlikely(ret)) @@ -1148,6 +1157,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) unsigned flags; int ret, min_ret = 0; + io_notif_set_extended(sr->notif); + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -1307,12 +1318,13 @@ retry: return IOU_OK; } - if (ret >= 0 && - io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false)) + if (ret < 0) + return ret; + if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER, + req->cqe.user_data, ret, IORING_CQE_F_MORE, true)) goto retry; - io_req_set_res(req, ret, 0); - return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK; + return -ECANCELED; } int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) diff --git a/io_uring/notif.c b/io_uring/notif.c index e37c6569d82e..c4bb793ebf0e 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -9,11 +9,14 @@ #include "notif.h" #include "rsrc.h" -static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked) +static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked) { struct io_notif_data *nd = io_notif_to_data(notif); struct io_ring_ctx *ctx = notif->ctx; + if (nd->zc_report && (nd->zc_copied || !nd->zc_used)) + notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED; + if (nd->account_pages && ctx->user) { __io_unaccount_mem(ctx->user, nd->account_pages); nd->account_pages = 0; @@ -21,16 +24,41 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked) io_req_task_complete(notif, locked); } -static void io_uring_tx_zerocopy_callback(struct sk_buff *skb, - struct ubuf_info *uarg, - bool success) +static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg, + bool success) { struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); struct io_kiocb *notif = cmd_to_io_kiocb(nd); - if (refcount_dec_and_test(&uarg->refcnt)) { - notif->io_task_work.func = __io_notif_complete_tw; + if (refcount_dec_and_test(&uarg->refcnt)) io_req_task_work_add(notif); +} + +static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg, + bool success) +{ + struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); + + if (nd->zc_report) { + if (success && !nd->zc_used && skb) + WRITE_ONCE(nd->zc_used, true); + else if (!success && !nd->zc_copied) + WRITE_ONCE(nd->zc_copied, true); + } + io_tx_ubuf_callback(skb, uarg, success); +} + +void io_notif_set_extended(struct io_kiocb *notif) +{ + struct io_notif_data *nd = io_notif_to_data(notif); + + if (nd->uarg.callback != io_tx_ubuf_callback_ext) { + nd->account_pages = 0; + nd->zc_report = false; + nd->zc_used = false; + nd->zc_copied = false; + nd->uarg.callback = io_tx_ubuf_callback_ext; + notif->io_task_work.func = io_notif_complete_tw_ext; } } @@ -49,24 +77,11 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) notif->task = current; io_get_task_refs(1); notif->rsrc_node = NULL; - io_req_set_rsrc_node(notif, ctx, 0); + notif->io_task_work.func = io_req_task_complete; nd = io_notif_to_data(notif); - nd->account_pages = 0; nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; - nd->uarg.callback = io_uring_tx_zerocopy_callback; + nd->uarg.callback = io_tx_ubuf_callback; refcount_set(&nd->uarg.refcnt, 1); return notif; } - -void io_notif_flush(struct io_kiocb *notif) - __must_hold(&slot->notif->ctx->uring_lock) -{ - struct io_notif_data *nd = io_notif_to_data(notif); - - /* drop slot's master ref */ - if (refcount_dec_and_test(&nd->uarg.refcnt)) { - notif->io_task_work.func = __io_notif_complete_tw; - io_req_task_work_add(notif); - } -} diff --git a/io_uring/notif.h b/io_uring/notif.h index 5b4d710c8ca5..c88c800cd89d 100644 --- a/io_uring/notif.h +++ b/io_uring/notif.h @@ -13,16 +13,29 @@ struct io_notif_data { struct file *file; struct ubuf_info uarg; unsigned long account_pages; + bool zc_report; + bool zc_used; + bool zc_copied; }; -void io_notif_flush(struct io_kiocb *notif); struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx); +void io_notif_set_extended(struct io_kiocb *notif); static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif) { return io_kiocb_to_cmd(notif, struct io_notif_data); } +static inline void io_notif_flush(struct io_kiocb *notif) + __must_hold(¬if->ctx->uring_lock) +{ + struct io_notif_data *nd = io_notif_to_data(notif); + + /* drop slot's master ref */ + if (refcount_dec_and_test(&nd->uarg.refcnt)) + io_req_task_work_add(notif); +} + static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len) { struct io_ring_ctx *ctx = notif->ctx; diff --git a/io_uring/poll.c b/io_uring/poll.c index d9bf1767867e..599ba28c89b2 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -280,16 +280,14 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) continue; if (req->apoll_events & EPOLLONESHOT) return IOU_POLL_DONE; - if (io_is_uring_fops(req->file)) - return IOU_POLL_DONE; /* multishot, just fill a CQE and proceed */ if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events); - if (!io_post_aux_cqe(ctx, req->cqe.user_data, - mask, IORING_CQE_F_MORE, false)) { + if (!io_aux_cqe(ctx, *locked, req->cqe.user_data, + mask, IORING_CQE_F_MORE, false)) { io_req_set_res(req, mask, 0); return IOU_POLL_REMOVE_POLL_USE_RES; } @@ -345,26 +343,22 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) if (ret == IOU_POLL_NO_ACTION) return; + io_tw_lock(req->ctx, locked); io_poll_remove_entries(req); io_poll_tw_hash_eject(req, locked); if (ret == IOU_POLL_REMOVE_POLL_USE_RES) - io_req_complete_post(req); + io_req_task_complete(req, locked); else if (ret == IOU_POLL_DONE) io_req_task_submit(req, locked); else - io_req_complete_failed(req, ret); + io_req_defer_failed(req, ret); } static void __io_poll_execute(struct io_kiocb *req, int mask) { io_req_set_res(req, mask, 0); - /* - * This is useful for poll that is armed on behalf of another - * request, and where the wakeup path could be on a different - * CPU. We want to avoid pulling in req->apoll->events for that - * case. - */ + if (req->opcode == IORING_OP_POLL_ADD) req->io_task_work.func = io_poll_task_func; else @@ -429,6 +423,14 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, return 0; if (io_poll_get_ownership(req)) { + /* + * If we trigger a multishot poll off our own wakeup path, + * disable multishot as there is a circular dependency between + * CQ posting and triggering the event. + */ + if (mask & EPOLL_URING_WAKE) + poll->events |= EPOLLONESHOT; + /* optional, saves extra locking for removal in tw handler */ if (mask && poll->events & EPOLLONESHOT) { list_del_init(&poll->wait.entry); @@ -648,10 +650,13 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, if (req->flags & REQ_F_POLLED) { apoll = req->apoll; kfree(apoll->double_poll); - } else if (!(issue_flags & IO_URING_F_UNLOCKED) && - (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) { + } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { + entry = io_alloc_cache_get(&ctx->apoll_cache); + if (entry == NULL) + goto alloc_apoll; apoll = container_of(entry, struct async_poll, cache); } else { +alloc_apoll: apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return NULL; diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 55d4ab96fb92..133608200769 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -170,10 +170,10 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) if (prsrc->tag) { if (ctx->flags & IORING_SETUP_IOPOLL) { mutex_lock(&ctx->uring_lock); - io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true); + io_post_aux_cqe(ctx, prsrc->tag, 0, 0); mutex_unlock(&ctx->uring_lock); } else { - io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true); + io_post_aux_cqe(ctx, prsrc->tag, 0, 0); } } @@ -321,6 +321,11 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, if (atomic_dec_and_test(&data->refs)) break; mutex_unlock(&ctx->uring_lock); + + ret = io_run_task_work_sig(ctx); + if (ret < 0) + goto reinit; + flush_delayed_work(&ctx->rsrc_put_work); ret = wait_for_completion_interruptible(&data->done); if (!ret) { @@ -336,12 +341,12 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, } } +reinit: atomic_inc(&data->refs); /* wait for all works potentially completing data->done */ flush_delayed_work(&ctx->rsrc_put_work); reinit_completion(&data->done); - ret = io_run_task_work_sig(ctx); mutex_lock(&ctx->uring_lock); } while (ret >= 0); data->quiesce = false; diff --git a/io_uring/rw.c b/io_uring/rw.c index fed5570be335..77576835a848 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -286,6 +286,12 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res) static void io_req_rw_complete(struct io_kiocb *req, bool *locked) { io_req_io_end(req); + + if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { + unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED; + + req->cqe.flags |= io_put_kbuf(req, issue_flags); + } io_req_task_complete(req, locked); } diff --git a/io_uring/timeout.c b/io_uring/timeout.c index e8a8c2099480..5b4bc93fd6e0 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status) atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&timeout->list); - io_req_tw_post_queue(req, status, 0); + io_req_queue_tw_complete(req, status); return true; } return false; @@ -159,7 +159,7 @@ void io_disarm_next(struct io_kiocb *req) req->flags &= ~REQ_F_ARM_LTIMEOUT; if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { io_remove_next_linked(req); - io_req_tw_post_queue(link, -ECANCELED, 0); + io_req_queue_tw_complete(link, -ECANCELED); } } else if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; @@ -168,7 +168,7 @@ void io_disarm_next(struct io_kiocb *req) link = io_disarm_linked_timeout(req); spin_unlock_irq(&ctx->timeout_lock); if (link) - io_req_tw_post_queue(link, -ECANCELED, 0); + io_req_queue_tw_complete(link, -ECANCELED); } if (unlikely((req->flags & REQ_F_FAIL) && !(req->flags & REQ_F_HARDLINK))) @@ -282,11 +282,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); } io_req_set_res(req, ret ?: -ETIME, 0); - io_req_complete_post(req); + io_req_task_complete(req, locked); io_put_req(prev); } else { io_req_set_res(req, -ETIME, 0); - io_req_complete_post(req); + io_req_task_complete(req, locked); } } diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index e50de0b6b9f8..446a189b78b0 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -56,7 +56,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2) /* order with io_iopoll_req_issued() checking ->iopoll_complete */ smp_store_release(&req->iopoll_completed, 1); else - __io_req_complete(req, 0); + io_req_complete_post(req, 0); } EXPORT_SYMBOL_GPL(io_uring_cmd_done); |