diff options
author | Chao Yu <yuchao0@huawei.com> | 2020-04-08 14:55:17 +0300 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2020-05-12 06:37:13 +0300 |
commit | 4fec3fc026717f81e34fca59937b0acbfb05642d (patch) | |
tree | 3fe7043d02e277ce19ca2decd22f4912cc65a5ca /fs/f2fs | |
parent | c75488fb4d82b697f381f855bf5b16779df440aa (diff) | |
download | linux-4fec3fc026717f81e34fca59937b0acbfb05642d.tar.xz |
f2fs: use round_up to enhance calculation
.i_cluster_size should be power of 2, so we can use round_up() instead
of roundup() to enhance the calculation.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r-- | fs/f2fs/file.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 48d908811807..97cc95a4a8ed 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -742,16 +742,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) * for compressed file, only support cluster size * aligned truncation. */ - if (f2fs_compressed_file(inode)) { - size_t cluster_shift = PAGE_SHIFT + - F2FS_I(inode)->i_log_cluster_size; - size_t cluster_mask = (1 << cluster_shift) - 1; - - free_from = from >> cluster_shift; - if (from & cluster_mask) - free_from++; - free_from <<= cluster_shift; - } + if (f2fs_compressed_file(inode)) + free_from = round_up(from, + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); #endif err = f2fs_do_truncate_blocks(inode, free_from, lock); @@ -3563,7 +3556,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = release_compress_blocks(&dn, count); @@ -3715,7 +3708,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); - count = roundup(count, F2FS_I(inode)->i_cluster_size); + count = round_up(count, F2FS_I(inode)->i_cluster_size); ret = reserve_compress_blocks(&dn, count); |