summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c67
1 files changed, 36 insertions, 31 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 19a8bde5e1e1..aad655e38672 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -155,19 +155,19 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
-static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
- struct io_buffer_list *bl,
- unsigned int issue_flags)
+static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ struct io_buffer_list *bl,
+ unsigned int issue_flags)
{
struct io_uring_buf_ring *br = bl->buf_ring;
__u16 tail, head = bl->head;
+ struct io_br_sel sel = { };
struct io_uring_buf *buf;
- void __user *ret;
u32 buf_len;
tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
- return NULL;
+ return sel;
if (head + 1 == tail)
req->flags |= REQ_F_BL_EMPTY;
@@ -177,9 +177,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
if (*len == 0 || *len > buf_len)
*len = buf_len;
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
- req->buf_list = bl;
req->buf_index = buf->bid;
- ret = u64_to_user_ptr(buf->addr);
+ sel.buf_list = bl;
+ sel.addr = u64_to_user_ptr(buf->addr);
if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
/*
@@ -192,30 +192,30 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
* the transfer completes (or if we get -EAGAIN and must poll of
* retry).
*/
- io_kbuf_commit(req, bl, *len, 1);
- req->buf_list = NULL;
+ io_kbuf_commit(req, sel.buf_list, *len, 1);
+ sel.buf_list = NULL;
}
- return ret;
+ return sel;
}
-void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
- unsigned buf_group, unsigned int issue_flags)
+struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
+ unsigned buf_group, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_br_sel sel = { };
struct io_buffer_list *bl;
- void __user *ret = NULL;
io_ring_submit_lock(req->ctx, issue_flags);
bl = io_buffer_get_list(ctx, buf_group);
if (likely(bl)) {
if (bl->flags & IOBL_BUF_RING)
- ret = io_ring_buffer_select(req, len, bl, issue_flags);
+ sel = io_ring_buffer_select(req, len, bl, issue_flags);
else
- ret = io_provided_buffer_select(req, len, bl);
+ sel.addr = io_provided_buffer_select(req, len, bl);
}
io_ring_submit_unlock(req->ctx, issue_flags);
- return ret;
+ return sel;
}
/* cap it at a reasonable 256, will be one page even for 4K */
@@ -300,24 +300,22 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
req->flags |= REQ_F_BL_EMPTY;
req->flags |= REQ_F_BUFFER_RING;
- req->buf_list = bl;
return iov - arg->iovs;
}
int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
- unsigned int issue_flags)
+ struct io_br_sel *sel, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer_list *bl;
int ret = -ENOENT;
io_ring_submit_lock(ctx, issue_flags);
- bl = io_buffer_get_list(ctx, arg->buf_group);
- if (unlikely(!bl))
+ sel->buf_list = io_buffer_get_list(ctx, arg->buf_group);
+ if (unlikely(!sel->buf_list))
goto out_unlock;
- if (bl->flags & IOBL_BUF_RING) {
- ret = io_ring_buffers_peek(req, arg, bl);
+ if (sel->buf_list->flags & IOBL_BUF_RING) {
+ ret = io_ring_buffers_peek(req, arg, sel->buf_list);
/*
* Don't recycle these buffers if we need to go through poll.
* Nobody else can use them anyway, and holding on to provided
@@ -327,17 +325,21 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
*/
if (ret > 0) {
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
- io_kbuf_commit(req, bl, arg->out_len, ret);
+ io_kbuf_commit(req, sel->buf_list, arg->out_len, ret);
}
} else {
- ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
+ ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
}
out_unlock:
- io_ring_submit_unlock(ctx, issue_flags);
+ if (issue_flags & IO_URING_F_UNLOCKED) {
+ sel->buf_list = NULL;
+ mutex_unlock(&ctx->uring_lock);
+ }
return ret;
}
-int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
+int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_br_sel *sel)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
@@ -353,16 +355,18 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
ret = io_ring_buffers_peek(req, arg, bl);
if (ret > 0)
req->flags |= REQ_F_BUFFERS_COMMIT;
+ sel->buf_list = bl;
return ret;
}
/* don't support multiple buffer selections for legacy */
+ sel->buf_list = NULL;
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
}
-static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
{
- struct io_buffer_list *bl = req->buf_list;
bool ret = true;
if (bl)
@@ -372,7 +376,8 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
return ret;
}
-unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
+unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
+ int len, int nbufs)
{
unsigned int ret;
@@ -383,7 +388,7 @@ unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
return ret;
}
- if (!__io_put_kbuf_ring(req, len, nbufs))
+ if (!__io_put_kbuf_ring(req, bl, len, nbufs))
ret |= IORING_CQE_F_BUF_MORE;
return ret;
}