summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-06-03 16:42:28 +0300
committerJens Axboe <axboe@kernel.dk>2025-06-04 00:32:44 +0300
commit607d09d1a01e9f29e91733e3a08b63ed240aacb2 (patch)
treed06e429f7822708192e37602063b73640536f321
parente931d3a9d5200bae9d938be2582072b2898e37f7 (diff)
downloadlinux-607d09d1a01e9f29e91733e3a08b63ed240aacb2.tar.xz
io_uring/kbuf: limit legacy provided buffer lists to USHRT_MAX
The buffer ID for a provided buffer is an unsigned short, and hence there can only be 64k added to any given buffer list before having duplicate BIDs. Cap the legacy provided buffers at 64k in the list. This is mostly to prevent silly stall reports from syzbot, which likes to dump tons of buffers into a list and then have kernels with lockdep and kasan churning through them and hitting long wait times for buffer pruning at ring exit time. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/kbuf.c17
-rw-r--r--io_uring/kbuf.h3
2 files changed, 18 insertions, 2 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 8cce3ebd813f..2ea65f3cef72 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -108,6 +108,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
list_add(&buf->list, &bl->buf_list);
+ bl->nbufs++;
req->flags &= ~REQ_F_BUFFER_SELECTED;
io_ring_submit_unlock(ctx, issue_flags);
@@ -122,6 +123,7 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list);
+ bl->nbufs--;
if (*len == 0 || *len > kbuf->len)
*len = kbuf->len;
if (list_empty(&bl->buf_list))
@@ -390,6 +392,7 @@ static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&nxt->list);
+ bl->nbufs--;
kfree(nxt);
cond_resched();
}
@@ -491,14 +494,24 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
{
struct io_buffer *buf;
u64 addr = pbuf->addr;
- int i, bid = pbuf->bid;
+ int ret = -ENOMEM, i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
+ /*
+ * Nonsensical to have more than sizeof(bid) buffers in a
+ * buffer list, as the application then has no way of knowing
+ * which duplicate bid refers to what buffer.
+ */
+ if (bl->nbufs == USHRT_MAX) {
+ ret = -EOVERFLOW;
+ break;
+ }
buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
break;
list_add_tail(&buf->list, &bl->buf_list);
+ bl->nbufs++;
buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid;
@@ -508,7 +521,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
cond_resched();
}
- return i ? 0 : -ENOMEM;
+ return i ? 0 : ret;
}
static int __io_manage_buffers_legacy(struct io_kiocb *req,
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 4d2c209d1a41..5d83c7adc739 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -21,6 +21,9 @@ struct io_buffer_list {
struct list_head buf_list;
struct io_uring_buf_ring *buf_ring;
};
+ /* count of classic/legacy buffers in buffer list */
+ int nbufs;
+
__u16 bgid;
/* below is for ring provided buffers */