summaryrefslogtreecommitdiff
path: root/io_uring/sqpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/sqpoll.c')
-rw-r--r--io_uring/sqpoll.c83
1 files changed, 46 insertions, 37 deletions
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index a26593979887..a3f11349ce06 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -16,11 +16,12 @@
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "tctx.h"
#include "napi.h"
#include "sqpoll.h"
#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
-#define IORING_TW_CAP_ENTRIES_VALUE 8
+#define IORING_TW_CAP_ENTRIES_VALUE 32
enum {
IO_SQ_THREAD_SHOULD_STOP = 0,
@@ -30,7 +31,7 @@ enum {
void io_sq_thread_unpark(struct io_sq_data *sqd)
__releases(&sqd->lock)
{
- WARN_ON_ONCE(sqd->thread == current);
+ WARN_ON_ONCE(sqpoll_task_locked(sqd) == current);
/*
* Do the dance but not conditional clear_bit() because it'd race with
@@ -40,29 +41,38 @@ void io_sq_thread_unpark(struct io_sq_data *sqd)
if (atomic_dec_return(&sqd->park_pending))
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
mutex_unlock(&sqd->lock);
+ wake_up(&sqd->wait);
}
void io_sq_thread_park(struct io_sq_data *sqd)
__acquires(&sqd->lock)
{
- WARN_ON_ONCE(data_race(sqd->thread) == current);
+ struct task_struct *tsk;
atomic_inc(&sqd->park_pending);
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
+
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk) {
+ WARN_ON_ONCE(tsk == current);
+ wake_up_process(tsk);
+ }
}
void io_sq_thread_stop(struct io_sq_data *sqd)
{
- WARN_ON_ONCE(sqd->thread == current);
+ struct task_struct *tsk;
+
WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk) {
+ WARN_ON_ONCE(tsk == current);
+ wake_up_process(tsk);
+ }
mutex_unlock(&sqd->lock);
wait_for_completion(&sqd->exited);
}
@@ -106,29 +116,21 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
{
struct io_ring_ctx *ctx_attach;
struct io_sq_data *sqd;
- struct fd f;
+ CLASS(fd, f)(p->wq_fd);
- f = fdget(p->wq_fd);
- if (!fd_file(f))
+ if (fd_empty(f))
return ERR_PTR(-ENXIO);
- if (!io_is_uring_fops(fd_file(f))) {
- fdput(f);
+ if (!io_is_uring_fops(fd_file(f)))
return ERR_PTR(-EINVAL);
- }
ctx_attach = fd_file(f)->private_data;
sqd = ctx_attach->sq_data;
- if (!sqd) {
- fdput(f);
+ if (!sqd)
return ERR_PTR(-EINVAL);
- }
- if (sqd->task_tgid != current->tgid) {
- fdput(f);
+ if (sqd->task_tgid != current->tgid)
return ERR_PTR(-EPERM);
- }
refcount_inc(&sqd->refs);
- fdput(f);
return sqd;
}
@@ -215,7 +217,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
mutex_unlock(&sqd->lock);
if (signal_pending(current))
did_sig = get_signal(&ksig);
- cond_resched();
+ wait_event(sqd->wait, !atomic_read(&sqd->park_pending));
mutex_lock(&sqd->lock);
sqd->sq_cpu = raw_smp_processor_id();
}
@@ -271,12 +273,17 @@ static int io_sq_thread(void *data)
struct io_ring_ctx *ctx;
struct rusage start;
unsigned long timeout = 0;
- char buf[TASK_COMM_LEN];
+ char buf[TASK_COMM_LEN] = {};
DEFINE_WAIT(wait);
/* offload context creation failed, just exit */
- if (!current->io_uring)
+ if (!current->io_uring) {
+ mutex_lock(&sqd->lock);
+ rcu_assign_pointer(sqd->thread, NULL);
+ put_task_struct(current);
+ mutex_unlock(&sqd->lock);
goto err_out;
+ }
snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
set_task_comm(current, buf);
@@ -382,7 +389,8 @@ static int io_sq_thread(void *data)
io_sq_tw(&retry_list, UINT_MAX);
io_uring_cancel_generic(true, sqd);
- sqd->thread = NULL;
+ rcu_assign_pointer(sqd->thread, NULL);
+ put_task_struct(current);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
io_run_task_work();
@@ -417,16 +425,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
/* Retain compatibility with failing for an invalid attach attempt */
if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
IORING_SETUP_ATTACH_WQ) {
- struct fd f;
-
- f = fdget(p->wq_fd);
- if (!fd_file(f))
+ CLASS(fd, f)(p->wq_fd);
+ if (fd_empty(f))
return -ENXIO;
- if (!io_is_uring_fops(fd_file(f))) {
- fdput(f);
+ if (!io_is_uring_fops(fd_file(f)))
return -EINVAL;
- }
- fdput(f);
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct task_struct *tsk;
@@ -491,7 +494,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
goto err_sqpoll;
}
- sqd->thread = tsk;
+ mutex_lock(&sqd->lock);
+ rcu_assign_pointer(sqd->thread, tsk);
+ mutex_unlock(&sqd->lock);
+
+ get_task_struct(tsk);
ret = io_uring_alloc_task_context(tsk, ctx);
wake_up_new_task(tsk);
if (ret)
@@ -501,7 +508,6 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
ret = -EINVAL;
goto err;
}
-
return 0;
err_sqpoll:
complete(&ctx->sq_data->exited);
@@ -517,10 +523,13 @@ __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
int ret = -EINVAL;
if (sqd) {
+ struct task_struct *tsk;
+
io_sq_thread_park(sqd);
/* Don't set affinity for a dying thread */
- if (sqd->thread)
- ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk)
+ ret = io_wq_cpu_affinity(tsk->io_uring, mask);
io_sq_thread_unpark(sqd);
}