summaryrefslogtreecommitdiff
path: root/fs/ext4
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-07-19 01:30:00 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-09 12:31:45 +0300
commit2266fe580adf5cc928d2ffd68949547f7952eab1 (patch)
tree00860681c1ae7559304af1d49da16a1eabbc6943 /fs/ext4
parentfbd359a2ee201f81119fa56eaf86e5b499220fc3 (diff)
downloadlinux-2266fe580adf5cc928d2ffd68949547f7952eab1.tar.xz
ext4: pipeline buffer reads in mext_page_mkuptodate()
[ Upstream commit 368a83cebbb949adbcc20877c35367178497d9cc ] Instead of synchronously reading one buffer at a time, submit reads as we walk the buffers in the first loop, then wait for them in the second loop. This should be significantly more efficient, particularly on HDDs, but I have not measured. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://patch.msgid.link/20240718223005.568869-2-willy@infradead.org Signed-off-by: Theodore Ts'o <tytso@mit.edu> Stable-dep-of: 2f3d93e210b9 ("ext4: fix race in buffer_head read fault injection") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/move_extent.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index a3aa85795d4a..28d59548770d 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -173,7 +173,9 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
sector_t block;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize, block_start, block_end;
- int i, err, nr = 0, partial = 0;
+ int i, nr = 0;
+ bool partial = false;
+
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
@@ -193,13 +195,13 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
- partial = 1;
+ partial = true;
continue;
}
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
- err = ext4_get_block(inode, block, bh, 0);
+ int err = ext4_get_block(inode, block, bh, 0);
if (err)
return err;
if (!buffer_mapped(bh)) {
@@ -208,6 +210,12 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
continue;
}
}
+ lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+ ext4_read_bh_nowait(bh, 0, NULL);
BUG_ON(nr >= MAX_BUF_PER_PAGE);
arr[nr++] = bh;
}
@@ -217,11 +225,10 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
for (i = 0; i < nr; i++) {
bh = arr[i];
- if (!bh_uptodate_or_lock(bh)) {
- err = ext4_read_bh(bh, 0, NULL);
- if (err)
- return err;
- }
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ continue;
+ return -EIO;
}
out:
if (!partial)