diff options
Diffstat (limited to 'fs/f2fs/segment.c')
| -rw-r--r-- | fs/f2fs/segment.c | 444 | 
1 files changed, 278 insertions, 166 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index e1065ba70207..4fd76e867e0a 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -192,6 +192,9 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)  	if (!f2fs_is_atomic_file(inode))  		return; +	if (clean) +		truncate_inode_pages_final(inode->i_mapping); +  	release_atomic_write_cnt(inode);  	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);  	clear_inode_flag(inode, FI_ATOMIC_REPLACE); @@ -201,7 +204,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)  	F2FS_I(inode)->atomic_write_task = NULL;  	if (clean) { -		truncate_inode_pages_final(inode->i_mapping);  		f2fs_i_size_write(inode, fi->original_i_size);  		fi->original_i_size = 0;  	} @@ -248,7 +250,7 @@ retry:  	} else {  		blkcnt_t count = 1; -		err = inc_valid_block_count(sbi, inode, &count); +		err = inc_valid_block_count(sbi, inode, &count, true);  		if (err) {  			f2fs_put_dnode(&dn);  			return err; @@ -334,8 +336,6 @@ static int __f2fs_commit_atomic_write(struct inode *inode)  					DATA_GENERIC_ENHANCE)) {  				f2fs_put_dnode(&dn);  				ret = -EFSCORRUPTED; -				f2fs_handle_error(sbi, -						ERROR_INVALID_BLKADDR);  				goto out;  			} @@ -400,6 +400,9 @@ int f2fs_commit_atomic_write(struct inode *inode)   */  void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)  { +	if (f2fs_cp_error(sbi)) +		return; +  	if (time_to_inject(sbi, FAULT_CHECKPOINT))  		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); @@ -448,8 +451,8 @@ static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)  	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);  	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);  	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); -	unsigned int threshold = sbi->blocks_per_seg * factor * -					DEFAULT_DIRTY_THRESHOLD; +	unsigned int threshold = +		SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));  	unsigned int global_threshold = threshold * 3 / 2;  	if (dents >= threshold || qdata >= threshold || @@ -872,7 +875,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)  {  	int ovp_hole_segs =  		(overprovision_segments(sbi) - reserved_segments(sbi)); -	block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; +	block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);  	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);  	block_t holes[2] = {0, 0};	/* DATA and NODE */  	block_t unusable; @@ -901,11 +904,16 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)  {  	int ovp_hole_segs =  		(overprovision_segments(sbi) - reserved_segments(sbi)); + +	if (F2FS_OPTION(sbi).unusable_cap_perc == 100) +		return 0;  	if (unusable > F2FS_OPTION(sbi).unusable_cap)  		return -EAGAIN;  	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&  		dirty_segments(sbi) > ovp_hole_segs)  		return -EAGAIN; +	if (has_not_enough_free_secs(sbi, 0, 0)) +		return -EAGAIN;  	return 0;  } @@ -1132,8 +1140,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,  	struct seg_entry *sentry;  	unsigned int segno;  	block_t blk = start; -	unsigned long offset, size, max_blocks = sbi->blocks_per_seg; -	unsigned long *map; +	unsigned long offset, size, *map;  	while (blk < end) {  		segno = GET_SEGNO(sbi, blk); @@ -1143,7 +1150,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,  		if (end < START_BLOCK(sbi, segno + 1))  			size = GET_BLKOFF_FROM_SEG0(sbi, end);  		else -			size = max_blocks; +			size = BLKS_PER_SEG(sbi);  		map = (unsigned long *)(sentry->cur_valid_map);  		offset = __find_rev_next_bit(map, size, offset);  		f2fs_bug_on(sbi, offset != size); @@ -2048,7 +2055,6 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,  							bool check_only)  {  	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); -	int max_blocks = sbi->blocks_per_seg;  	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);  	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;  	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; @@ -2060,8 +2066,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,  	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;  	int i; -	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) || -			!f2fs_block_unit_discard(sbi)) +	if (se->valid_blocks == BLKS_PER_SEG(sbi) || +	    !f2fs_hw_support_discard(sbi) || +	    !f2fs_block_unit_discard(sbi))  		return false;  	if (!force) { @@ -2078,13 +2085,14 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,  	while (force || SM_I(sbi)->dcc_info->nr_discards <=  				SM_I(sbi)->dcc_info->max_discards) { -		start = __find_rev_next_bit(dmap, max_blocks, end + 1); -		if (start >= max_blocks) +		start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); +		if (start >= BLKS_PER_SEG(sbi))  			break; -		end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); -		if (force && start && end != max_blocks -					&& (end - start) < cpc->trim_minlen) +		end = __find_rev_next_zero_bit(dmap, +						BLKS_PER_SEG(sbi), start + 1); +		if (force && start && end != BLKS_PER_SEG(sbi) && +		    (end - start) < cpc->trim_minlen)  			continue;  		if (check_only) @@ -2166,8 +2174,8 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,  								start + 1);  		if (section_alignment) { -			start = rounddown(start, sbi->segs_per_sec); -			end = roundup(end, sbi->segs_per_sec); +			start = rounddown(start, SEGS_PER_SEC(sbi)); +			end = roundup(end, SEGS_PER_SEC(sbi));  		}  		for (i = start; i < end; i++) { @@ -2186,7 +2194,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,  		if (!f2fs_sb_has_blkzoned(sbi) &&  		    (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {  			f2fs_issue_discard(sbi, START_BLOCK(sbi, start), -				(end - start) << sbi->log_blocks_per_seg); +				SEGS_TO_BLKS(sbi, end - start));  			continue;  		}  next: @@ -2195,9 +2203,9 @@ next:  		if (!IS_CURSEC(sbi, secno) &&  			!get_valid_blocks(sbi, start, true))  			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), -				sbi->segs_per_sec << sbi->log_blocks_per_seg); +						BLKS_PER_SEC(sbi)); -		start = start_segno + sbi->segs_per_sec; +		start = start_segno + SEGS_PER_SEC(sbi);  		if (start < end)  			goto next;  		else @@ -2216,7 +2224,7 @@ next:  find_next:  		if (is_valid) {  			next_pos = find_next_zero_bit_le(entry->discard_map, -					sbi->blocks_per_seg, cur_pos); +						BLKS_PER_SEG(sbi), cur_pos);  			len = next_pos - cur_pos;  			if (f2fs_sb_has_blkzoned(sbi) || @@ -2228,13 +2236,13 @@ find_next:  			total_len += len;  		} else {  			next_pos = find_next_bit_le(entry->discard_map, -					sbi->blocks_per_seg, cur_pos); +						BLKS_PER_SEG(sbi), cur_pos);  		}  skip:  		cur_pos = next_pos;  		is_valid = !is_valid; -		if (cur_pos < sbi->blocks_per_seg) +		if (cur_pos < BLKS_PER_SEG(sbi))  			goto find_next;  		release_discard_addr(entry); @@ -2251,6 +2259,12 @@ int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)  	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;  	int err = 0; +	if (f2fs_sb_has_readonly(sbi)) { +		f2fs_info(sbi, +			"Skip to start discard thread for readonly image"); +		return 0; +	} +  	if (!f2fs_realtime_discard_enable(sbi))  		return 0; @@ -2283,7 +2297,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)  	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;  	dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;  	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) -		dcc->discard_granularity = sbi->blocks_per_seg; +		dcc->discard_granularity = BLKS_PER_SEG(sbi);  	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)  		dcc->discard_granularity = BLKS_PER_SEC(sbi); @@ -2297,7 +2311,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)  	atomic_set(&dcc->queued_discard, 0);  	atomic_set(&dcc->discard_cmd_cnt, 0);  	dcc->nr_discards = 0; -	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; +	dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));  	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;  	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;  	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME; @@ -2405,6 +2419,8 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)  #endif  	segno = GET_SEGNO(sbi, blkaddr); +	if (segno == NULL_SEGNO) +		return;  	se = get_seg_entry(sbi, segno);  	new_vblocks = se->valid_blocks + del; @@ -2546,7 +2562,7 @@ static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int typ  	struct curseg_info *curseg = CURSEG_I(sbi, type);  	if (sbi->ckpt->alloc_type[type] == SSR) -		return sbi->blocks_per_seg; +		return BLKS_PER_SEG(sbi);  	return curseg->next_blkoff;  } @@ -2634,7 +2650,7 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi,  	unsigned int segno = curseg->segno + 1;  	struct free_segmap_info *free_i = FREE_I(sbi); -	if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) +	if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))  		return !test_bit(segno, free_i->free_segmap);  	return 0;  } @@ -2643,54 +2659,51 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi,   * Find a new segment from the free segments bitmap to right order   * This function should be returned with success, otherwise BUG   */ -static void get_new_segment(struct f2fs_sb_info *sbi, -			unsigned int *newseg, bool new_sec, int dir) +static int get_new_segment(struct f2fs_sb_info *sbi, +			unsigned int *newseg, bool new_sec, bool pinning)  {  	struct free_segmap_info *free_i = FREE_I(sbi);  	unsigned int segno, secno, zoneno;  	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;  	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);  	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); -	unsigned int left_start = hint;  	bool init = true; -	int go_left = 0;  	int i; +	int ret = 0;  	spin_lock(&free_i->segmap_lock); -	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { +	if (time_to_inject(sbi, FAULT_NO_SEGMENT)) { +		ret = -ENOSPC; +		goto out_unlock; +	} + +	if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {  		segno = find_next_zero_bit(free_i->free_segmap,  			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);  		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))  			goto got_it;  	} + +	/* +	 * If we format f2fs on zoned storage, let's try to get pinned sections +	 * from beginning of the storage, which should be a conventional one. +	 */ +	if (f2fs_sb_has_blkzoned(sbi)) { +		segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg); +		hint = GET_SEC_FROM_SEG(sbi, segno); +	} +  find_other_zone:  	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);  	if (secno >= MAIN_SECS(sbi)) { -		if (dir == ALLOC_RIGHT) { -			secno = find_first_zero_bit(free_i->free_secmap, +		secno = find_first_zero_bit(free_i->free_secmap,  							MAIN_SECS(sbi)); -			f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); -		} else { -			go_left = 1; -			left_start = hint - 1; +		if (secno >= MAIN_SECS(sbi)) { +			ret = -ENOSPC; +			goto out_unlock;  		}  	} -	if (go_left == 0) -		goto skip_left; - -	while (test_bit(left_start, free_i->free_secmap)) { -		if (left_start > 0) { -			left_start--; -			continue; -		} -		left_start = find_first_zero_bit(free_i->free_secmap, -							MAIN_SECS(sbi)); -		f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); -		break; -	} -	secno = left_start; -skip_left:  	segno = GET_SEG_FROM_SEC(sbi, secno);  	zoneno = GET_ZONE_FROM_SEC(sbi, secno); @@ -2701,21 +2714,13 @@ skip_left:  		goto got_it;  	if (zoneno == old_zoneno)  		goto got_it; -	if (dir == ALLOC_LEFT) { -		if (!go_left && zoneno + 1 >= total_zones) -			goto got_it; -		if (go_left && zoneno == 0) -			goto got_it; -	}  	for (i = 0; i < NR_CURSEG_TYPE; i++)  		if (CURSEG_I(sbi, i)->zone == zoneno)  			break;  	if (i < NR_CURSEG_TYPE) {  		/* zone is in user, try another */ -		if (go_left) -			hint = zoneno * sbi->secs_per_zone - 1; -		else if (zoneno + 1 >= total_zones) +		if (zoneno + 1 >= total_zones)  			hint = 0;  		else  			hint = (zoneno + 1) * sbi->secs_per_zone; @@ -2725,9 +2730,23 @@ skip_left:  got_it:  	/* set it as dirty segment in free segmap */  	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); + +	/* no free section in conventional zone */ +	if (new_sec && pinning && +		!f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { +		ret = -EAGAIN; +		goto out_unlock; +	}  	__set_inuse(sbi, segno);  	*newseg = segno; +out_unlock:  	spin_unlock(&free_i->segmap_lock); + +	if (ret == -ENOSPC) { +		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); +		f2fs_bug_on(sbi, 1); +	} +	return ret;  }  static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) @@ -2736,6 +2755,10 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)  	struct summary_footer *sum_footer;  	unsigned short seg_type = curseg->seg_type; +	/* only happen when get_new_segment() fails */ +	if (curseg->next_segno == NULL_SEGNO) +		return; +  	curseg->inited = true;  	curseg->segno = curseg->next_segno;  	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); @@ -2761,9 +2784,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)  	sanity_check_seg_type(sbi, seg_type);  	if (f2fs_need_rand_seg(sbi)) -		return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec); +		return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); -	/* if segs_per_sec is large than 1, we need to keep original policy. */  	if (__is_large_section(sbi))  		return curseg->segno; @@ -2774,8 +2796,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)  	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))  		return 0; -	if (test_opt(sbi, NOHEAP) && -		(seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))) +	if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))  		return 0;  	if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) @@ -2792,30 +2813,31 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)   * Allocate a current working segment.   * This function always allocates a free segment in LFS manner.   */ -static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) +static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)  {  	struct curseg_info *curseg = CURSEG_I(sbi, type); -	unsigned short seg_type = curseg->seg_type;  	unsigned int segno = curseg->segno; -	int dir = ALLOC_LEFT; +	bool pinning = type == CURSEG_COLD_DATA_PINNED; +	int ret;  	if (curseg->inited) -		write_sum_page(sbi, curseg->sum_blk, -				GET_SUM_BLOCK(sbi, segno)); -	if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA) -		dir = ALLOC_RIGHT; - -	if (test_opt(sbi, NOHEAP)) -		dir = ALLOC_RIGHT; +		write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));  	segno = __get_next_segno(sbi, type); -	get_new_segment(sbi, &segno, new_sec, dir); +	ret = get_new_segment(sbi, &segno, new_sec, pinning); +	if (ret) { +		if (ret == -ENOSPC) +			curseg->segno = NULL_SEGNO; +		return ret; +	} +  	curseg->next_segno = segno;  	reset_curseg(sbi, type, 1);  	curseg->alloc_type = LFS;  	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)  		curseg->fragment_remained_chunk =  				get_random_u32_inclusive(1, sbi->max_fragment_chunk); +	return 0;  }  static int __next_free_blkoff(struct f2fs_sb_info *sbi, @@ -2831,7 +2853,7 @@ static int __next_free_blkoff(struct f2fs_sb_info *sbi,  	for (i = 0; i < entries; i++)  		target_map[i] = ckpt_map[i] | cur_map[i]; -	return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); +	return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);  }  static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, @@ -2842,14 +2864,14 @@ static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,  bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)  { -	return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg; +	return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);  }  /*   * This function always allocates a used segment(from dirty seglist) by SSR   * manner, so it should recover the existing segment information of valid blocks   */ -static void change_curseg(struct f2fs_sb_info *sbi, int type) +static int change_curseg(struct f2fs_sb_info *sbi, int type)  {  	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);  	struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -2874,21 +2896,23 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)  	if (IS_ERR(sum_page)) {  		/* GC won't be able to use stale summary pages by cp_error */  		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE); -		return; +		return PTR_ERR(sum_page);  	}  	sum_node = (struct f2fs_summary_block *)page_address(sum_page);  	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);  	f2fs_put_page(sum_page, 1); +	return 0;  }  static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,  				int alloc_mode, unsigned long long age); -static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, +static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,  					int target_type, int alloc_mode,  					unsigned long long age)  {  	struct curseg_info *curseg = CURSEG_I(sbi, type); +	int ret = 0;  	curseg->seg_type = target_type; @@ -2896,38 +2920,41 @@ static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,  		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);  		curseg->seg_type = se->type; -		change_curseg(sbi, type); +		ret = change_curseg(sbi, type);  	} else {  		/* allocate cold segment by default */  		curseg->seg_type = CURSEG_COLD_DATA; -		new_curseg(sbi, type, true); +		ret = new_curseg(sbi, type, true);  	}  	stat_inc_seg_type(sbi, curseg); +	return ret;  } -static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) +static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)  {  	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); +	int ret = 0;  	if (!sbi->am.atgc_enabled) -		return; +		return 0;  	f2fs_down_read(&SM_I(sbi)->curseg_lock);  	mutex_lock(&curseg->curseg_mutex);  	down_write(&SIT_I(sbi)->sentry_lock); -	get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0); +	ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, +					CURSEG_COLD_DATA, SSR, 0);  	up_write(&SIT_I(sbi)->sentry_lock);  	mutex_unlock(&curseg->curseg_mutex);  	f2fs_up_read(&SM_I(sbi)->curseg_lock); - +	return ret;  } -void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) +int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)  { -	__f2fs_init_atgc_curseg(sbi); +	return __f2fs_init_atgc_curseg(sbi);  }  static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) @@ -3055,11 +3082,12 @@ static bool need_new_seg(struct f2fs_sb_info *sbi, int type)  	return false;  } -void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, +int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,  					unsigned int start, unsigned int end)  {  	struct curseg_info *curseg = CURSEG_I(sbi, type);  	unsigned int segno; +	int ret = 0;  	f2fs_down_read(&SM_I(sbi)->curseg_lock);  	mutex_lock(&curseg->curseg_mutex); @@ -3070,9 +3098,9 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,  		goto unlock;  	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) -		change_curseg(sbi, type); +		ret = change_curseg(sbi, type);  	else -		new_curseg(sbi, type, true); +		ret = new_curseg(sbi, type, true);  	stat_inc_seg_type(sbi, curseg); @@ -3086,45 +3114,84 @@ unlock:  	mutex_unlock(&curseg->curseg_mutex);  	f2fs_up_read(&SM_I(sbi)->curseg_lock); +	return ret;  } -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, +static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,  						bool new_sec, bool force)  {  	struct curseg_info *curseg = CURSEG_I(sbi, type);  	unsigned int old_segno; +	int err = 0; + +	if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited) +		goto allocate;  	if (!force && curseg->inited &&  	    !curseg->next_blkoff &&  	    !get_valid_blocks(sbi, curseg->segno, new_sec) &&  	    !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) -		return; +		return 0; +allocate:  	old_segno = curseg->segno; -	new_curseg(sbi, type, true); +	err = new_curseg(sbi, type, true); +	if (err) +		return err;  	stat_inc_seg_type(sbi, curseg);  	locate_dirty_segment(sbi, old_segno); +	return 0;  } -void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) +int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)  { +	int ret; +  	f2fs_down_read(&SM_I(sbi)->curseg_lock);  	down_write(&SIT_I(sbi)->sentry_lock); -	__allocate_new_segment(sbi, type, true, force); +	ret = __allocate_new_segment(sbi, type, true, force);  	up_write(&SIT_I(sbi)->sentry_lock);  	f2fs_up_read(&SM_I(sbi)->curseg_lock); + +	return ret;  } -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) +int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) +{ +	int err; +	bool gc_required = true; + +retry: +	f2fs_lock_op(sbi); +	err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); +	f2fs_unlock_op(sbi); + +	if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) { +		f2fs_down_write(&sbi->gc_lock); +		err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1); +		f2fs_up_write(&sbi->gc_lock); + +		gc_required = false; +		if (!err) +			goto retry; +	} + +	return err; +} + +int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)  {  	int i; +	int err = 0;  	f2fs_down_read(&SM_I(sbi)->curseg_lock);  	down_write(&SIT_I(sbi)->sentry_lock);  	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) -		__allocate_new_segment(sbi, i, false, false); +		err += __allocate_new_segment(sbi, i, false, false);  	up_write(&SIT_I(sbi)->sentry_lock);  	f2fs_up_read(&SM_I(sbi)->curseg_lock); + +	return err;  }  bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, @@ -3242,8 +3309,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)  	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :  						GET_SEGNO(sbi, end);  	if (need_align) { -		start_segno = rounddown(start_segno, sbi->segs_per_sec); -		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1; +		start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); +		end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;  	}  	cpc.reason = CP_DISCARD; @@ -3416,7 +3483,14 @@ static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,  		get_random_u32_inclusive(1, sbi->max_fragment_hole);  } -void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, +static void reset_curseg_fields(struct curseg_info *curseg) +{ +	curseg->inited = false; +	curseg->segno = NULL_SEGNO; +	curseg->next_segno = 0; +} + +int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  		block_t old_blkaddr, block_t *new_blkaddr,  		struct f2fs_summary *sum, int type,  		struct f2fs_io_info *fio) @@ -3427,12 +3501,18 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);  	struct seg_entry *se = NULL;  	bool segment_full = false; +	int ret = 0;  	f2fs_down_read(&SM_I(sbi)->curseg_lock);  	mutex_lock(&curseg->curseg_mutex);  	down_write(&sit_i->sentry_lock); +	if (curseg->segno == NULL_SEGNO) { +		ret = -ENOSPC; +		goto out_err; +	} +  	if (from_gc) {  		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);  		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); @@ -3441,7 +3521,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	}  	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); -	f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg); +	f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));  	f2fs_wait_discard_bio(sbi, *new_blkaddr); @@ -3470,25 +3550,35 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	 * since SSR needs latest valid block information.  	 */  	update_sit_entry(sbi, *new_blkaddr, 1); -	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) -		update_sit_entry(sbi, old_blkaddr, -1); +	update_sit_entry(sbi, old_blkaddr, -1);  	/*  	 * If the current segment is full, flush it out and replace it with a  	 * new segment.  	 */  	if (segment_full) { +		if (type == CURSEG_COLD_DATA_PINNED && +		    !((curseg->segno + 1) % sbi->segs_per_sec)) { +			reset_curseg_fields(curseg); +			goto skip_new_segment; +		} +  		if (from_gc) { -			get_atssr_segment(sbi, type, se->type, +			ret = get_atssr_segment(sbi, type, se->type,  						AT_SSR, se->mtime);  		} else {  			if (need_new_seg(sbi, type)) -				new_curseg(sbi, type, false); +				ret = new_curseg(sbi, type, false);  			else -				change_curseg(sbi, type); +				ret = change_curseg(sbi, type);  			stat_inc_seg_type(sbi, curseg);  		} + +		if (ret) +			goto out_err;  	} + +skip_new_segment:  	/*  	 * segment dirty status should be updated after segment allocation,  	 * so we just need to update status only one time after previous @@ -3497,12 +3587,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));  	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); -	if (IS_DATASEG(type)) +	if (IS_DATASEG(curseg->seg_type))  		atomic64_inc(&sbi->allocated_data_blocks);  	up_write(&sit_i->sentry_lock); -	if (page && IS_NODESEG(type)) { +	if (page && IS_NODESEG(curseg->seg_type)) {  		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));  		f2fs_inode_chksum_set(sbi, page); @@ -3511,9 +3601,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	if (fio) {  		struct f2fs_bio_info *io; -		if (F2FS_IO_ALIGNED(sbi)) -			fio->retry = 0; -  		INIT_LIST_HEAD(&fio->list);  		fio->in_list = 1;  		io = sbi->write_io[fio->type] + fio->temp; @@ -3523,8 +3610,15 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,  	}  	mutex_unlock(&curseg->curseg_mutex); -  	f2fs_up_read(&SM_I(sbi)->curseg_lock); +	return 0; +out_err: +	*new_blkaddr = NULL_ADDR; +	up_write(&sit_i->sentry_lock); +	mutex_unlock(&curseg->curseg_mutex); +	f2fs_up_read(&SM_I(sbi)->curseg_lock); +	return ret; +  }  void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, @@ -3561,21 +3655,25 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)  	if (keep_order)  		f2fs_down_read(&fio->sbi->io_order_lock); -reallocate: -	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, -			&fio->new_blkaddr, sum, type, fio); + +	if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, +			&fio->new_blkaddr, sum, type, fio)) { +		if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host)) +			fscrypt_finalize_bounce_page(&fio->encrypted_page); +		if (PageWriteback(fio->page)) +			end_page_writeback(fio->page); +		if (f2fs_in_warm_node_list(fio->sbi, fio->page)) +			f2fs_del_fsync_node_entry(fio->sbi, fio->page); +		goto out; +	}  	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)  		f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);  	/* writeout dirty page into bdev */  	f2fs_submit_page_write(fio); -	if (fio->retry) { -		fio->old_blkaddr = fio->new_blkaddr; -		goto reallocate; -	}  	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); - +out:  	if (keep_order)  		f2fs_up_read(&fio->sbi->io_order_lock);  } @@ -3659,8 +3757,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)  	}  	if (fio->post_read) -		invalidate_mapping_pages(META_MAPPING(sbi), -				fio->new_blkaddr, fio->new_blkaddr); +		f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);  	stat_inc_inplace_blocks(fio->sbi); @@ -3749,7 +3846,8 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,  	/* change the current segment */  	if (segno != curseg->segno) {  		curseg->next_segno = segno; -		change_curseg(sbi, type); +		if (change_curseg(sbi, type)) +			goto out_unlock;  	}  	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); @@ -3775,12 +3873,14 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,  	if (recover_curseg) {  		if (old_cursegno != curseg->segno) {  			curseg->next_segno = old_cursegno; -			change_curseg(sbi, type); +			if (change_curseg(sbi, type)) +				goto out_unlock;  		}  		curseg->next_blkoff = old_blkoff;  		curseg->alloc_type = old_alloc_type;  	} +out_unlock:  	up_write(&sit_i->sentry_lock);  	mutex_unlock(&curseg->curseg_mutex);  	f2fs_up_write(&SM_I(sbi)->curseg_lock); @@ -3850,7 +3950,7 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,  	for (i = 0; i < len; i++)  		f2fs_wait_on_block_writeback(inode, blkaddr + i); -	invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1); +	f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);  }  static int read_compacted_summaries(struct f2fs_sb_info *sbi) @@ -3892,7 +3992,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)  		seg_i->next_blkoff = blk_off;  		if (seg_i->alloc_type == SSR) -			blk_off = sbi->blocks_per_seg; +			blk_off = BLKS_PER_SEG(sbi);  		for (j = 0; j < blk_off; j++) {  			struct f2fs_summary *s; @@ -3960,7 +4060,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)  			struct f2fs_summary *ns = &sum->entries[0];  			int i; -			for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { +			for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {  				ns->version = 0;  				ns->ofs_in_node = 0;  			} @@ -4466,7 +4566,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)  #endif  	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); -	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; +	sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);  	sit_i->written_valid_blocks = 0;  	sit_i->bitmap_size = sit_bitmap_size;  	sit_i->dirty_sentries = 0; @@ -4539,9 +4639,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)  			array[i].seg_type = CURSEG_COLD_DATA;  		else if (i == CURSEG_ALL_DATA_ATGC)  			array[i].seg_type = CURSEG_COLD_DATA; -		array[i].segno = NULL_SEGNO; -		array[i].next_blkoff = 0; -		array[i].inited = false; +		reset_curseg_fields(&array[i]);  	}  	return restore_curseg_summaries(sbi);  } @@ -4593,21 +4691,20 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)  			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; -			if (f2fs_block_unit_discard(sbi)) { -				/* build discard map only one time */ -				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { -					memset(se->discard_map, 0xff, +			if (!f2fs_block_unit_discard(sbi)) +				goto init_discard_map_done; + +			/* build discard map only one time */ +			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { +				memset(se->discard_map, 0xff,  						SIT_VBLOCK_MAP_SIZE); -				} else { -					memcpy(se->discard_map, -						se->cur_valid_map, +				goto init_discard_map_done; +			} +			memcpy(se->discard_map, se->cur_valid_map,  						SIT_VBLOCK_MAP_SIZE); -					sbi->discard_blks += -						sbi->blocks_per_seg - +			sbi->discard_blks += BLKS_PER_SEG(sbi) -  						se->valid_blocks; -				} -			} - +init_discard_map_done:  			if (__is_large_section(sbi))  				get_sec_entry(sbi, start)->valid_blocks +=  							se->valid_blocks; @@ -4747,7 +4844,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)  		return;  	mutex_lock(&dirty_i->seglist_lock); -	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { +	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {  		valid_blocks = get_valid_blocks(sbi, segno, true);  		secno = GET_SEC_FROM_SEG(sbi, segno); @@ -4846,7 +4943,7 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)  		if (curseg->alloc_type == SSR)  			continue; -		for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) { +		for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {  			if (!f2fs_test_bit(blkofs, se->cur_valid_map))  				continue;  out: @@ -4862,6 +4959,16 @@ out:  }  #ifdef CONFIG_BLK_DEV_ZONED +static const char *f2fs_zone_status[BLK_ZONE_COND_OFFLINE + 1] = { +	[BLK_ZONE_COND_NOT_WP]		= "NOT_WP", +	[BLK_ZONE_COND_EMPTY]		= "EMPTY", +	[BLK_ZONE_COND_IMP_OPEN]	= "IMPLICIT_OPEN", +	[BLK_ZONE_COND_EXP_OPEN]	= "EXPLICIT_OPEN", +	[BLK_ZONE_COND_CLOSED]		= "CLOSED", +	[BLK_ZONE_COND_READONLY]	= "READONLY", +	[BLK_ZONE_COND_FULL]		= "FULL", +	[BLK_ZONE_COND_OFFLINE]		= "OFFLINE", +};  static int check_zone_write_pointer(struct f2fs_sb_info *sbi,  				    struct f2fs_dev_info *fdev, @@ -4883,14 +4990,19 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,  	 * Skip check of zones cursegs point to, since  	 * fix_curseg_write_pointer() checks them.  	 */ -	if (zone_segno >= MAIN_SEGS(sbi) || -	    IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) +	if (zone_segno >= MAIN_SEGS(sbi))  		return 0;  	/*  	 * Get # of valid block of the zone.  	 */  	valid_block_cnt = get_valid_blocks(sbi, zone_segno, true); +	if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) { +		f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]", +				zone_segno, valid_block_cnt, +				f2fs_zone_status[zone->cond]); +		return 0; +	}  	if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||  	    (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL)) @@ -4898,8 +5010,8 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,  	if (!valid_block_cnt) {  		f2fs_notice(sbi, "Zone without valid block has non-zero write " -			    "pointer. Reset the write pointer: cond[0x%x]", -			    zone->cond); +			    "pointer. Reset the write pointer: cond[%s]", +			    f2fs_zone_status[zone->cond]);  		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,  					zone->len >> log_sectors_per_block);  		if (ret) @@ -4916,8 +5028,8 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,  	 * selected for write operation until it get discarded.  	 */  	f2fs_notice(sbi, "Valid blocks are not aligned with write " -		    "pointer: valid block[0x%x,0x%x] cond[0x%x]", -		    zone_segno, valid_block_cnt, zone->cond); +		    "pointer: valid block[0x%x,0x%x] cond[%s]", +		    zone_segno, valid_block_cnt, f2fs_zone_status[zone->cond]);  	nofs_flags = memalloc_nofs_save();  	ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, @@ -5128,7 +5240,7 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(  	unsigned int secno;  	if (!sbi->unusable_blocks_per_sec) -		return sbi->blocks_per_seg; +		return BLKS_PER_SEG(sbi);  	secno = GET_SEC_FROM_SEG(sbi, segno);  	seg_start = START_BLOCK(sbi, segno); @@ -5143,10 +5255,10 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(  	 */  	if (seg_start >= sec_cap_blkaddr)  		return 0; -	if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr) +	if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)  		return sec_cap_blkaddr - seg_start; -	return sbi->blocks_per_seg; +	return BLKS_PER_SEG(sbi);  }  #else  int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi) @@ -5172,7 +5284,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,  	if (f2fs_sb_has_blkzoned(sbi))  		return f2fs_usable_zone_blks_in_seg(sbi, segno); -	return sbi->blocks_per_seg; +	return BLKS_PER_SEG(sbi);  }  unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, @@ -5181,7 +5293,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,  	if (f2fs_sb_has_blkzoned(sbi))  		return CAP_SEGS_PER_SEC(sbi); -	return sbi->segs_per_sec; +	return SEGS_PER_SEC(sbi);  }  /* @@ -5196,14 +5308,14 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)  	sit_i->min_mtime = ULLONG_MAX; -	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { +	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {  		unsigned int i;  		unsigned long long mtime = 0; -		for (i = 0; i < sbi->segs_per_sec; i++) +		for (i = 0; i < SEGS_PER_SEC(sbi); i++)  			mtime += get_seg_entry(sbi, segno + i)->mtime; -		mtime = div_u64(mtime, sbi->segs_per_sec); +		mtime = div_u64(mtime, SEGS_PER_SEC(sbi));  		if (sit_i->min_mtime > mtime)  			sit_i->min_mtime = mtime; @@ -5242,7 +5354,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)  		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);  	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;  	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; -	sm_info->min_seq_blocks = sbi->blocks_per_seg; +	sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);  	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;  	sm_info->min_ssr_sections = reserved_sections(sbi);  | 
