summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhang Yi <yi.zhang@huawei.com>2026-03-27 13:29:27 +0300
committerTheodore Ts'o <tytso@mit.edu>2026-04-10 04:57:51 +0300
commit5447c8b9de7581ca7254d712652678cc460a18c2 (patch)
tree68472d51b617aa1a05c937aba76d1be84b3d5d3c
parent6ea3b34d8625ef5544d1c619bd67e2c6080ea4c2 (diff)
downloadlinux-5447c8b9de7581ca7254d712652678cc460a18c2.tar.xz
ext4: add did_zero output parameter to ext4_block_zero_page_range()
Add a bool *did_zero output parameter to ext4_block_zero_page_range() and __ext4_block_zero_page_range(). The parameter reports whether a partial block was zeroed out, which is needed for the upcoming iomap buffered I/O conversion. Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://patch.msgid.link/20260327102939.1095257-2-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r--fs/ext4/inode.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 13cd564f89e1..f0c9c63f618b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4033,7 +4033,8 @@ void ext4_set_aops(struct inode *inode)
* racing writeback can come later and flush the stale pagecache to disk.
*/
static int __ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length)
+ struct address_space *mapping, loff_t from, loff_t length,
+ bool *did_zero)
{
unsigned int offset, blocksize, pos;
ext4_lblk_t iblock;
@@ -4121,6 +4122,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
err = ext4_jbd2_inode_add_write(handle, inode, from,
length);
}
+ if (!err && did_zero)
+ *did_zero = true;
unlock:
folio_unlock(folio);
@@ -4136,7 +4139,8 @@ unlock:
* that corresponds to 'from'
*/
static int ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length)
+ struct address_space *mapping, loff_t from, loff_t length,
+ bool *did_zero)
{
struct inode *inode = mapping->host;
unsigned blocksize = inode->i_sb->s_blocksize;
@@ -4150,10 +4154,11 @@ static int ext4_block_zero_page_range(handle_t *handle,
length = max;
if (IS_DAX(inode)) {
- return dax_zero_range(inode, from, length, NULL,
+ return dax_zero_range(inode, from, length, did_zero,
&ext4_iomap_ops);
}
- return __ext4_block_zero_page_range(handle, mapping, from, length);
+ return __ext4_block_zero_page_range(handle, mapping, from, length,
+ did_zero);
}
/*
@@ -4176,7 +4181,7 @@ static int ext4_block_truncate_page(handle_t *handle,
blocksize = i_blocksize(inode);
length = blocksize - (from & (blocksize - 1));
- return ext4_block_zero_page_range(handle, mapping, from, length);
+ return ext4_block_zero_page_range(handle, mapping, from, length, NULL);
}
int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
@@ -4199,13 +4204,13 @@ int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
if (start == end &&
(partial_start || (partial_end != sb->s_blocksize - 1))) {
err = ext4_block_zero_page_range(handle, mapping,
- lstart, length);
+ lstart, length, NULL);
return err;
}
/* Handle partial zero out on the start of the range */
if (partial_start) {
- err = ext4_block_zero_page_range(handle, mapping,
- lstart, sb->s_blocksize);
+ err = ext4_block_zero_page_range(handle, mapping, lstart,
+ sb->s_blocksize, NULL);
if (err)
return err;
}
@@ -4213,7 +4218,7 @@ int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
if (partial_end != sb->s_blocksize - 1)
err = ext4_block_zero_page_range(handle, mapping,
byte_end - partial_end,
- partial_end + 1);
+ partial_end + 1, NULL);
return err;
}