diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-02-16 21:41:41 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-02-22 03:25:22 +0300 |
commit | d25e3a3de0d6fb2f660dbc7d643b2c632beb1743 (patch) | |
tree | f7f62ee8c420fb06a0dd0eb7f50f122c4a5b4384 /fs/io_uring.c | |
parent | 1cbd9c2bcf02a3be91e14c7206d4b6c0346540ed (diff) | |
download | linux-d25e3a3de0d6fb2f660dbc7d643b2c632beb1743.tar.xz |
io_uring: disable io-wq attaching
Moving towards making the io_wq per ring per task, so we can't really
share it between rings. Which is fine, since we've now dropped some
of that fat from it.
Retain compatibility with how attaching works, so that any attempt to
attach to an fd that doesn't exist, or isn't an io_uring fd, will fail
like it did before.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 55 |
1 files changed, 22 insertions, 33 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index e3eb37304e24..d6c2ff6124fd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8105,12 +8105,9 @@ static struct io_wq_work *io_free_work(struct io_wq_work *work) return req ? &req->work : NULL; } -static int io_init_wq_offload(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static int io_init_wq_offload(struct io_ring_ctx *ctx) { struct io_wq_data data; - struct fd f; - struct io_ring_ctx *ctx_attach; unsigned int concurrency; int ret = 0; @@ -8118,37 +8115,15 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx, data.free_work = io_free_work; data.do_work = io_wq_submit_work; - if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { - /* Do QD, or 4 * CPUS, whatever is smallest */ - concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - - ctx->io_wq = io_wq_create(concurrency, &data); - if (IS_ERR(ctx->io_wq)) { - ret = PTR_ERR(ctx->io_wq); - ctx->io_wq = NULL; - } - return ret; - } - - f = fdget(p->wq_fd); - if (!f.file) - return -EBADF; - - if (f.file->f_op != &io_uring_fops) { - ret = -EINVAL; - goto out_fput; - } + /* Do QD, or 4 * CPUS, whatever is smallest */ + concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx_attach = f.file->private_data; - /* @io_wq is protected by holding the fd */ - if (!io_wq_get(ctx_attach->io_wq, &data)) { - ret = -EINVAL; - goto out_fput; + ctx->io_wq = io_wq_create(concurrency, &data); + if (IS_ERR(ctx->io_wq)) { + ret = PTR_ERR(ctx->io_wq); + ctx->io_wq = NULL; } - ctx->io_wq = ctx_attach->io_wq; -out_fput: - fdput(f); return ret; } @@ -8200,6 +8175,20 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, { int ret; + /* Retain compatibility with failing for an invalid attach attempt */ + if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == + IORING_SETUP_ATTACH_WQ) { + struct fd f; + + f = fdget(p->wq_fd); + if (!f.file) + return -ENXIO; + if (f.file->f_op != &io_uring_fops) { + fdput(f); + return -EINVAL; + } + fdput(f); + } if (ctx->flags & IORING_SETUP_SQPOLL) { struct io_sq_data *sqd; @@ -8257,7 +8246,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, } done: - ret = io_init_wq_offload(ctx, p); + ret = io_init_wq_offload(ctx); if (ret) goto err; |