diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-09-05 07:36:52 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-09-05 18:02:47 +0300 |
commit | c183edff33fdcd639d222a8f473bf44602adc655 (patch) | |
tree | 9a72c031a6b97bb8699c123e458a9b7daeda273a /fs | |
parent | 355afaeb578abac907217c256a844cfafb0337b2 (diff) | |
download | linux-c183edff33fdcd639d222a8f473bf44602adc655.tar.xz |
io_uring: fix explicit async read/write mapping for large segments
If we exceed UIO_FASTIOV, we don't handle the transition correctly
between an allocated vec for requests that are queued with IOSQE_ASYNC.
Store the iovec appropriately and re-set it in the iter iov in case
it changed.
Fixes: ff6165b2d7f6 ("io_uring: retain iov_iter state over io_read/io_write calls")
Reported-by: Nick Hill <nick@nickhill.org>
Tested-by: Norman Maurer <norman.maurer@googlemail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 64c5a52fad12..f703182df2d4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2980,14 +2980,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw, bool force_nonblock) { struct io_async_rw *iorw = &req->io->rw; + struct iovec *iov; ssize_t ret; - iorw->iter.iov = iorw->fast_iov; - ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov, - &iorw->iter, !force_nonblock); + iorw->iter.iov = iov = iorw->fast_iov; + ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock); if (unlikely(ret < 0)) return ret; + iorw->iter.iov = iov; io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter); return 0; } |