summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-23 01:19:35 +0300
committerJens Axboe <axboe@kernel.dk>2020-01-21 03:03:59 +0300
commit3a6820f2bb8a079975109c25a5d1f29f46bce5d2 (patch)
treefd5269613133e7eb1db2977b3973f1d0616b2eb2
parente94f141bd248ebdadcb7351f1e70b31cee5add53 (diff)
downloadlinux-3a6820f2bb8a079975109c25a5d1f29f46bce5d2.tar.xz
io_uring: add non-vectored read/write commands
For uses cases that don't already naturally have an iovec, it's easier (or more convenient) to just use a buffer address + length. This is particular true if the use case is from languages that want to create a memory safe abstraction on top of io_uring, and where introducing the need for the iovec may impose an ownership issue. For those cases, they currently need an indirection buffer, which means allocating data just for this purpose. Add basic read/write that don't require the iovec. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c23
-rw-r--r--include/uapi/linux/io_uring.h2
2 files changed, 25 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c54a8bd37b54..407ba3388e14 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -654,6 +654,18 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.fd_non_neg = 1,
},
+ {
+ /* IORING_OP_READ */
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ {
+ /* IORING_OP_WRITE */
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
};
static void io_wq_submit_work(struct io_wq_work **workptr);
@@ -1867,6 +1879,13 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
if (req->rw.kiocb.private)
return -EINVAL;
+ if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+ ssize_t ret;
+ ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
+
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
@@ -3634,10 +3653,12 @@ static int io_req_defer_prep(struct io_kiocb *req,
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
ret = io_read_prep(req, sqe, true);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
ret = io_write_prep(req, sqe, true);
break;
case IORING_OP_POLL_ADD:
@@ -3741,6 +3762,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
if (sqe) {
ret = io_read_prep(req, sqe, force_nonblock);
if (ret < 0)
@@ -3750,6 +3772,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
if (sqe) {
ret = io_write_prep(req, sqe, force_nonblock);
if (ret < 0)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index d7ec50247a3a..7fdf994f3313 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -84,6 +84,8 @@ enum {
IORING_OP_CLOSE,
IORING_OP_FILES_UPDATE,
IORING_OP_STATX,
+ IORING_OP_READ,
+ IORING_OP_WRITE,
/* this goes last, obviously */
IORING_OP_LAST,