diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-09-12 16:57:06 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-09-19 13:27:54 +0300 |
commit | 605d055452e7abed4aca012953aecc79c09baf12 (patch) | |
tree | 01d316e8225d0d82924fc7d23df2c837d32117e9 /io_uring | |
parent | b04f22b68643d35b3e0823ea92295b203ef83bf3 (diff) | |
download | linux-605d055452e7abed4aca012953aecc79c09baf12.tar.xz |
io_uring: break out of iowq iopoll on teardown
[ upstream commit 45500dc4e01c167ee063f3dcc22f51ced5b2b1e9 ]
io-wq will retry iopoll even when it failed with -EAGAIN. If that
races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
such workers might potentially infinitely spin retrying iopoll again and
again and each time failing on some allocation / waiting / etc. Don't
keep spinning if io-wq is dying.
Fixes: 561fb04a6a225 ("io_uring: replace workqueue usage with io-wq")
Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io-wq.c | 10 | ||||
-rw-r--r-- | io_uring/io-wq.h | 1 | ||||
-rw-r--r-- | io_uring/io_uring.c | 2 |
3 files changed, 13 insertions, 0 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 411bb2d1acd4..dc3d4b835622 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -181,6 +181,16 @@ static void io_worker_ref_put(struct io_wq *wq) complete(&wq->worker_done); } +bool io_wq_worker_stopped(void) +{ + struct io_worker *worker = current->worker_private; + + if (WARN_ON_ONCE(!io_wq_current_is_worker())) + return true; + + return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state); +} + static void io_worker_cancel_cb(struct io_worker *worker) { struct io_wqe_acct *acct = io_wqe_get_acct(worker); diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h index 31228426d192..31cc5cc9048c 100644 --- a/io_uring/io-wq.h +++ b/io_uring/io-wq.h @@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val); int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); int io_wq_max_workers(struct io_wq *wq, int *new_count); +bool io_wq_worker_stopped(void); static inline bool io_wq_is_hashed(struct io_wq_work *work) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 6d455e2428b9..7c8e81057eb1 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1823,6 +1823,8 @@ fail: if (!needs_poll) { if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) break; + if (io_wq_worker_stopped()) + break; cond_resched(); continue; } |