summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-10-29 03:43:13 +0300
committerJens Axboe <axboe@kernel.dk>2024-11-03 00:45:30 +0300
commitb16e920a1909da6799c43000db730d8fcdcae907 (patch)
treef7e2dd08aa3ba7502b82a9ee70fe4c27a0d2920f /io_uring
parentd50f94d761a5d9a34e03a86e512e19d88cbeaf06 (diff)
downloadlinux-b16e920a1909da6799c43000db730d8fcdcae907.tar.xz
io_uring/rsrc: allow cloning at an offset
Right now buffer cloning is an all-or-nothing kind of thing - either the whole table is cloned from a source to a destination ring, or nothing at all. However, it's not always desired to clone the whole thing. Allow for the application to specify a source and destination offset, and a number of buffers to clone. If the destination offset is non-zero, then allocate sparse nodes upfront. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/rsrc.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 7ad91f180566..289866315ecf 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -927,10 +927,11 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
return 0;
}
-static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
+ struct io_uring_clone_buffers *arg)
{
+ int i, ret, nbufs, off, nr;
struct io_rsrc_data data;
- int i, ret, nbufs;
/*
* Drop our own lock here. We'll setup the data we need and reference
@@ -943,11 +944,29 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
nbufs = src_ctx->buf_table.nr;
if (!nbufs)
goto out_unlock;
- ret = io_rsrc_data_alloc(&data, nbufs);
+ ret = -EINVAL;
+ if (!arg->nr)
+ arg->nr = nbufs;
+ else if (arg->nr > nbufs)
+ goto out_unlock;
+ ret = -EOVERFLOW;
+ if (check_add_overflow(arg->nr, arg->src_off, &off))
+ goto out_unlock;
+ if (off > nbufs)
+ goto out_unlock;
+ if (check_add_overflow(arg->nr, arg->dst_off, &off))
+ goto out_unlock;
+ ret = -EINVAL;
+ if (off > IORING_MAX_REG_BUFFERS)
+ goto out_unlock;
+ ret = io_rsrc_data_alloc(&data, off);
if (ret)
goto out_unlock;
- for (i = 0; i < nbufs; i++) {
+ off = arg->dst_off;
+ i = arg->src_off;
+ nr = arg->nr;
+ while (nr--) {
struct io_rsrc_node *dst_node, *src_node;
src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
@@ -963,7 +982,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
refcount_inc(&src_node->buf->refs);
dst_node->buf = src_node->buf;
}
- data.nodes[i] = dst_node;
+ data.nodes[off++] = dst_node;
+ i++;
}
/* Have a ref on the bufs now, drop src lock and re-grab our own lock */
@@ -1018,7 +1038,7 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
file = io_uring_register_get_file(buf.src_fd, registered_src);
if (IS_ERR(file))
return PTR_ERR(file);
- ret = io_clone_buffers(ctx, file->private_data);
+ ret = io_clone_buffers(ctx, file->private_data, &buf);
if (!registered_src)
fput(file);
return ret;