summaryrefslogtreecommitdiff
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2014-12-08 09:56:19 +0300
committerJaegeuk Kim <jaegeuk@kernel.org>2014-12-09 01:19:07 +0300
commit13da549460d549aec78a943e589f4ffc3fdc716c (patch)
treed8dbfe7096ce7895db2ead3f9ca030fd599ea086 /fs/f2fs
parent03e14d522eb1fdf9c0ce37085cb56749342a842c (diff)
downloadlinux-13da549460d549aec78a943e589f4ffc3fdc716c.tar.xz
f2fs: fix to enable readahead for SSA/CP blocks
1.We use zero as upper boundary value for ra SSA/CP blocks, we will skip readahead as verification failure with max number, it causes low performance. 2.Low boundary value is not accurate for SSA/CP/POR region verification, so these values need to be redefined. This patch fixes above issues. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/checkpoint.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 6a81b73add06..f3ebfb5a8431 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -95,8 +95,9 @@ static inline block_t get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
case META_SIT:
return SIT_BLK_CNT(sbi);
case META_SSA:
+ return MAIN_BLKADDR(sbi);
case META_CP:
- return 0;
+ return SM_I(sbi)->sit_info->sit_base_addr;
case META_POR:
return MAX_BLKADDR(sbi);
default:
@@ -141,11 +142,23 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
prev_blk_addr = blk_addr;
break;
case META_SSA:
+ if (unlikely(blkno >= max_blks))
+ goto out;
+ if (unlikely(blkno < SM_I(sbi)->ssa_blkaddr))
+ goto out;
+ blk_addr = blkno;
+ break;
case META_CP:
+ if (unlikely(blkno >= max_blks))
+ goto out;
+ if (unlikely(blkno < __start_cp_addr(sbi)))
+ goto out;
+ blk_addr = blkno;
+ break;
case META_POR:
if (unlikely(blkno >= max_blks))
goto out;
- if (unlikely(blkno < SEG0_BLKADDR(sbi)))
+ if (unlikely(blkno < MAIN_BLKADDR(sbi)))
goto out;
blk_addr = blkno;
break;