1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_IO_URING_CMD_H
#define _LINUX_IO_URING_CMD_H
#include <uapi/linux/io_uring.h>
#include <linux/io_uring_types.h>
#include <linux/blk-mq.h>
/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
#define IORING_URING_CMD_CANCELABLE (1U << 30)
/* io_uring_cmd is being issued again */
#define IORING_URING_CMD_REISSUE (1U << 31)
typedef void (*io_uring_cmd_tw_t)(struct io_uring_cmd *cmd,
unsigned issue_flags);
struct io_uring_cmd {
struct file *file;
const struct io_uring_sqe *sqe;
/* callback to defer completions to task context */
io_uring_cmd_tw_t task_work_cb;
u32 cmd_op;
u32 flags;
u8 pdu[32]; /* available inline for free use */
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
{
return sqe->cmd;
}
static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
{
BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
}
#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
((pdu_type *)&(cmd)->pdu) \
)
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter,
struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
const struct iovec __user *uvec,
size_t uvec_segs,
int ddir, struct iov_iter *iter,
unsigned issue_flags);
/*
* Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
* and the corresponding io_uring request.
*
* Note: the caller should never hard code @issue_flags and is only allowed
* to pass the mask provided by the core io_uring code.
*/
void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
unsigned issue_flags, bool is_cqe32);
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
io_uring_cmd_tw_t task_work_cb,
unsigned flags);
/*
* Note: the caller should never hard code @issue_flags and only use the
* mask provided by the core io_uring code.
*/
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
/* Execute the request from a blocking context */
void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
/*
* Select a buffer from the provided buffer group for multishot uring_cmd.
* Returns the selected buffer address and size.
*/
struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
unsigned buf_group, size_t *len,
unsigned int issue_flags);
/*
* Complete a multishot uring_cmd event. This will post a CQE to the completion
* queue and update the provided buffer.
*/
bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
struct io_br_sel *sel, unsigned int issue_flags);
#else
static inline int
io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, struct io_uring_cmd *ioucmd,
unsigned int issue_flags)
{
return -EOPNOTSUPP;
}
static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
const struct iovec __user *uvec,
size_t uvec_segs,
int ddir, struct iov_iter *iter,
unsigned issue_flags)
{
return -EOPNOTSUPP;
}
static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
u64 ret2, unsigned issue_flags, bool is_cqe32)
{
}
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
io_uring_cmd_tw_t task_work_cb, unsigned flags)
{
}
static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
}
static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
{
}
static inline struct io_br_sel
io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
size_t *len, unsigned int issue_flags)
{
return (struct io_br_sel) { .val = -EOPNOTSUPP };
}
static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
struct io_br_sel *sel, unsigned int issue_flags)
{
return true;
}
#endif
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
io_uring_cmd_tw_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
}
static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
io_uring_cmd_tw_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
}
static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
{
return cmd_to_io_kiocb(cmd)->tctx->task;
}
/*
* Return uring_cmd's context reference as its context handle for driver to
* track per-context resource, such as registered kernel IO buffer
*/
static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
{
return cmd_to_io_kiocb(cmd)->ctx;
}
static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
u64 res2, unsigned issue_flags)
{
return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, false);
}
static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
u64 res2, unsigned issue_flags)
{
return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
}
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
void (*release)(void *), unsigned int index,
unsigned int issue_flags);
int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
unsigned int issue_flags);
#endif /* _LINUX_IO_URING_CMD_H */
|