summaryrefslogtreecommitdiff
path: root/include/uapi/linux
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-09-11 19:42:40 +0300
committerJens Axboe <axboe@kernel.dk>2024-09-11 19:42:40 +0300
commit6d0f8dcb3a634bbee46fcb028c5984c463f47812 (patch)
treee77e8999fa9d79363a9b11832a0d7795a0e8ef4f /include/uapi/linux
parent318ad4283a6efea8ce5ec2b3c65b6cb19df6b07e (diff)
parent84eacf177faa605853c58e5b1c0d9544b88c16fd (diff)
downloadlinux-6d0f8dcb3a634bbee46fcb028c5984c463f47812.tar.xz
Merge branch 'for-6.12/io_uring' into for-6.12/io_uring-discard
* for-6.12/io_uring: (31 commits) io_uring/io-wq: inherit cpuset of cgroup in io worker io_uring/io-wq: do not allow pinning outside of cpuset io_uring/rw: drop -EOPNOTSUPP check in __io_complete_rw_common() io_uring/rw: treat -EOPNOTSUPP for IOCB_NOWAIT like -EAGAIN io_uring/sqpoll: do not allow pinning outside of cpuset io_uring/eventfd: move refs to refcount_t io_uring: remove unused rsrc_put_fn io_uring: add new line after variable declaration io_uring: add GCOV_PROFILE_URING Kconfig option io_uring/kbuf: add support for incremental buffer consumption io_uring/kbuf: pass in 'len' argument for buffer commit Revert "io_uring: Require zeroed sqe->len on provided-buffers send" io_uring/kbuf: move io_ring_head_to_buf() to kbuf.h io_uring/kbuf: add io_kbuf_commit() helper io_uring/kbuf: shrink nr_iovs/mode in struct buf_sel_arg io_uring: wire up min batch wake timeout io_uring: add support for batch wait timeout io_uring: implement our own schedule timeout handling io_uring: move schedule wait logic into helper io_uring: encapsulate extraneous wait flags into a separate struct ...
Diffstat (limited to 'include/uapi/linux')
-rw-r--r--include/uapi/linux/io_uring.h29
1 files changed, 28 insertions, 1 deletions
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index adc2524fd8e3..a275f91d2ac0 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -440,11 +440,21 @@ struct io_uring_cqe {
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
+ * IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
+ * more completions. In other words, the buffer is being
+ * partially consumed, and will be used by the kernel for
+ * more completions. This is only set for buffers used via
+ * the incremental buffer consumption, as provided by
+ * a ring buffer setup with IOU_PBUF_RING_INC. For any
+ * other provided buffer type, all completions with a
+ * buffer passed back is automatically returned to the
+ * application.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
+#define IORING_CQE_F_BUF_MORE (1U << 4)
#define IORING_CQE_BUFFER_SHIFT 16
@@ -507,6 +517,7 @@ struct io_cqring_offsets {
#define IORING_ENTER_SQ_WAIT (1U << 2)
#define IORING_ENTER_EXT_ARG (1U << 3)
#define IORING_ENTER_REGISTERED_RING (1U << 4)
+#define IORING_ENTER_ABS_TIMER (1U << 5)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
@@ -542,6 +553,7 @@ struct io_uring_params {
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
+#define IORING_FEAT_MIN_TIMEOUT (1U << 15)
/*
* io_uring_register(2) opcodes and arguments
@@ -595,6 +607,8 @@ enum io_uring_register_op {
IORING_REGISTER_NAPI = 27,
IORING_UNREGISTER_NAPI = 28,
+ IORING_REGISTER_CLOCK = 29,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -675,6 +689,11 @@ struct io_uring_restriction {
__u32 resv2[3];
};
+struct io_uring_clock_register {
+ __u32 clockid;
+ __u32 __resv[3];
+};
+
struct io_uring_buf {
__u64 addr;
__u32 len;
@@ -707,9 +726,17 @@ struct io_uring_buf_ring {
* mmap(2) with the offset set as:
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
* to get a virtual mapping for the ring.
+ * IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
+ * consumed incrementally. Normally one (or more) buffers
+ * are fully consumed. With incremental consumptions, it's
+ * feasible to register big ranges of buffers, and each
+ * use of it will consume only as much as it needs. This
+ * requires that both the kernel and application keep
+ * track of where the current read/recv index is at.
*/
enum io_uring_register_pbuf_ring_flags {
IOU_PBUF_RING_MMAP = 1,
+ IOU_PBUF_RING_INC = 2,
};
/* argument for IORING_(UN)REGISTER_PBUF_RING */
@@ -758,7 +785,7 @@ enum io_uring_register_restriction_op {
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
- __u32 pad;
+ __u32 min_wait_usec;
__u64 ts;
};