diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-13 03:28:24 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-13 03:28:24 +0300 |
commit | a641a88e5d6864f20b2608cb01165c756794e645 (patch) | |
tree | 5acf37ca592a87d705169174b51feb47bd253fa9 /fs | |
parent | 4ce9d181ebe53abbca5f450b8a2984b8c3a38f26 (diff) | |
parent | 2d008835ec2fcf6eef3285e41e62a5eabd1fe76b (diff) | |
download | linux-a641a88e5d6864f20b2608cb01165c756794e645.tar.xz |
Merge tag 'f2fs-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim:
"In this round, we've introduced native swap file support which can
exploit DIO, enhanced existing checkpoint=disable feature with
additional mount option to tune the triggering condition, and allowed
user to preallocate physical blocks in a pinned file which will be
useful to avoid f2fs fragmentation in append-only workloads. In
addition, we've fixed subtle quota corruption issue.
Enhancements:
- add swap file support which uses DIO
- allocate blocks for pinned file
- allow SSR and mount option to enhance checkpoint=disable
- enhance IPU IOs
- add more sanity checks such as memory boundary access
Bug fixes:
- quota corruption in very corner case of error-injected SPO case
- fix root_reserved on remount and some wrong counts
- add missing fsck flag
Some patches were also introduced to clean up ambiguous i_flags and
debugging messages codes"
* tag 'f2fs-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (33 commits)
f2fs: improve print log in f2fs_sanity_check_ckpt()
f2fs: avoid out-of-range memory access
f2fs: fix to avoid long latency during umount
f2fs: allow all the users to pin a file
f2fs: support swap file w/ DIO
f2fs: allocate blocks for pinned file
f2fs: fix is_idle() check for discard type
f2fs: add a rw_sem to cover quota flag changes
f2fs: set SBI_NEED_FSCK for xattr corruption case
f2fs: use generic EFSBADCRC/EFSCORRUPTED
f2fs: Use DIV_ROUND_UP() instead of open-coding
f2fs: print kernel message if filesystem is inconsistent
f2fs: introduce f2fs_<level> macros to wrap f2fs_printk()
f2fs: avoid get_valid_blocks() for cleanup
f2fs: ioctl for removing a range from F2FS
f2fs: only set project inherit bit for directory
f2fs: separate f2fs i_flags from fs_flags and ext4 i_flags
f2fs: replace ktype default_attrs with default_groups
f2fs: Add option to limit required GC for checkpoint=disable
f2fs: Fix accounting for unusable blocks
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/f2fs/checkpoint.c | 107 | ||||
-rw-r--r-- | fs/f2fs/data.c | 249 | ||||
-rw-r--r-- | fs/f2fs/debug.c | 7 | ||||
-rw-r--r-- | fs/f2fs/dir.c | 16 | ||||
-rw-r--r-- | fs/f2fs/extent_cache.c | 7 | ||||
-rw-r--r-- | fs/f2fs/f2fs.h | 129 | ||||
-rw-r--r-- | fs/f2fs/file.c | 302 | ||||
-rw-r--r-- | fs/f2fs/gc.c | 196 | ||||
-rw-r--r-- | fs/f2fs/inline.c | 16 | ||||
-rw-r--r-- | fs/f2fs/inode.c | 78 | ||||
-rw-r--r-- | fs/f2fs/namei.c | 10 | ||||
-rw-r--r-- | fs/f2fs/node.c | 38 | ||||
-rw-r--r-- | fs/f2fs/recovery.c | 43 | ||||
-rw-r--r-- | fs/f2fs/segment.c | 170 | ||||
-rw-r--r-- | fs/f2fs/segment.h | 16 | ||||
-rw-r--r-- | fs/f2fs/super.c | 610 | ||||
-rw-r--r-- | fs/f2fs/sysfs.c | 22 | ||||
-rw-r--r-- | fs/f2fs/xattr.c | 10 |
18 files changed, 1268 insertions, 758 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index ed70b68b2b38..a0eef95b9e0e 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -146,8 +146,8 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, exist = f2fs_test_bit(offset, se->cur_valid_map); if (!exist && type == DATA_GENERIC_ENHANCE) { - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " - "blkaddr:%u, sit bitmap:%d", blkaddr, exist); + f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", + blkaddr, exist); set_sbi_flag(sbi, SBI_NEED_FSCK); WARN_ON(1); } @@ -184,8 +184,8 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, case DATA_GENERIC_ENHANCE_READ: if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || blkaddr < MAIN_BLKADDR(sbi))) { - f2fs_msg(sbi->sb, KERN_WARNING, - "access invalid blkaddr:%u", blkaddr); + f2fs_warn(sbi, "access invalid blkaddr:%u", + blkaddr); set_sbi_flag(sbi, SBI_NEED_FSCK); WARN_ON(1); return false; @@ -657,9 +657,8 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) err_out: set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: orphan failed (ino=%x), run fsck to fix.", - __func__, ino); + f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.", + __func__, ino); return err; } @@ -676,13 +675,12 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) return 0; if (bdev_read_only(sbi->sb->s_bdev)) { - f2fs_msg(sbi->sb, KERN_INFO, "write access " - "unavailable, skipping orphan cleanup"); + f2fs_info(sbi, "write access unavailable, skipping orphan cleanup"); return 0; } if (s_flags & SB_RDONLY) { - f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); + f2fs_info(sbi, "orphan cleanup on readonly fs"); sbi->sb->s_flags &= ~SB_RDONLY; } @@ -827,26 +825,14 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, if (crc_offset < CP_MIN_CHKSUM_OFFSET || crc_offset > CP_CHKSUM_OFFSET) { f2fs_put_page(*cp_page, 1); - f2fs_msg(sbi->sb, KERN_WARNING, - "invalid crc_offset: %zu", crc_offset); + f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); return -EINVAL; } - if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) { - if (crc_offset != CP_MIN_CHKSUM_OFFSET) { - f2fs_put_page(*cp_page, 1); - f2fs_msg(sbi->sb, KERN_WARNING, - "layout of large_nat_bitmap is deprecated, " - "run fsck to repair, chksum_offset: %zu", - crc_offset); - return -EINVAL; - } - } - crc = f2fs_checkpoint_chksum(sbi, *cp_block); if (crc != cur_cp_crc(*cp_block)) { f2fs_put_page(*cp_page, 1); - f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); + f2fs_warn(sbi, "invalid crc value"); return -EINVAL; } @@ -869,9 +855,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (le32_to_cpu(cp_block->cp_pack_total_block_count) > sbi->blocks_per_seg) { - f2fs_msg(sbi->sb, KERN_WARNING, - "invalid cp_pack_total_block_count:%u", - le32_to_cpu(cp_block->cp_pack_total_block_count)); + f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", + le32_to_cpu(cp_block->cp_pack_total_block_count)); goto invalid_cp; } pre_version = *version; @@ -905,6 +890,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) unsigned int cp_blks = 1 + __cp_payload(sbi); block_t cp_blk_no; int i; + int err; sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), GFP_KERNEL); @@ -932,6 +918,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) } else if (cp2) { cur_page = cp2; } else { + err = -EFSCORRUPTED; goto fail_no_cp; } @@ -944,8 +931,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) sbi->cur_cp_pack = 2; /* Sanity checking of checkpoint */ - if (f2fs_sanity_check_ckpt(sbi)) + if (f2fs_sanity_check_ckpt(sbi)) { + err = -EFSCORRUPTED; goto free_fail_no_cp; + } if (cp_blks <= 1) goto done; @@ -959,8 +948,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) unsigned char *ckpt = (unsigned char *)sbi->ckpt; cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); - if (IS_ERR(cur_page)) + if (IS_ERR(cur_page)) { + err = PTR_ERR(cur_page); goto free_fail_no_cp; + } sit_bitmap_ptr = page_address(cur_page); memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size); f2fs_put_page(cur_page, 1); @@ -975,7 +966,7 @@ free_fail_no_cp: f2fs_put_page(cp2, 1); fail_no_cp: kvfree(sbi->ckpt); - return -EINVAL; + return err; } static void __add_dirty_inode(struct inode *inode, enum inode_type type) @@ -1142,17 +1133,24 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi) static bool __need_flush_quota(struct f2fs_sb_info *sbi) { + bool ret = false; + if (!is_journalled_quota(sbi)) return false; - if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) - return false; - if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) - return false; - if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) - return true; - if (get_pages(sbi, F2FS_DIRTY_QDATA)) - return true; - return false; + + down_write(&sbi->quota_sem); + if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { + ret = false; + } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { + ret = false; + } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { + clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); + ret = true; + } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { + ret = true; + } + up_write(&sbi->quota_sem); + return ret; } /* @@ -1171,26 +1169,22 @@ static int block_operations(struct f2fs_sb_info *sbi) blk_start_plug(&plug); retry_flush_quotas: + f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { int locked; if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) { set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); - f2fs_lock_all(sbi); + set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); goto retry_flush_dents; } - clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); + f2fs_unlock_all(sbi); /* only failed during mount/umount/freeze/quotactl */ locked = down_read_trylock(&sbi->sb->s_umount); f2fs_quota_sync(sbi->sb, -1); if (locked) up_read(&sbi->sb->s_umount); - } - - f2fs_lock_all(sbi); - if (__need_flush_quota(sbi)) { - f2fs_unlock_all(sbi); cond_resched(); goto retry_flush_quotas; } @@ -1212,12 +1206,6 @@ retry_flush_dents: */ down_write(&sbi->node_change); - if (__need_flush_quota(sbi)) { - up_write(&sbi->node_change); - f2fs_unlock_all(sbi); - goto retry_flush_quotas; - } - if (get_pages(sbi, F2FS_DIRTY_IMETA)) { up_write(&sbi->node_change); f2fs_unlock_all(sbi); @@ -1313,7 +1301,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) else __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); - if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || + is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) __set_ckpt_flags(ckpt, CP_FSCK_FLAG); if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) @@ -1328,10 +1317,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); - /* - * TODO: we count on fsck.f2fs to clear this flag until we figure out - * missing cases which clear it incorrectly. - */ + else + __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); @@ -1571,8 +1558,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { if (cpc->reason != CP_PAUSE) return 0; - f2fs_msg(sbi->sb, KERN_WARNING, - "Start checkpoint disabled!"); + f2fs_warn(sbi, "Start checkpoint disabled!"); } mutex_lock(&sbi->cp_mutex); @@ -1638,8 +1624,7 @@ stop: stat_inc_cp_count(sbi->stat_info); if (cpc->reason & CP_RECOVERY) - f2fs_msg(sbi->sb, KERN_NOTICE, - "checkpoint: version = %llx", ckpt_ver); + f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); /* do checkpoint periodically */ f2fs_update_time(sbi, CP_TIME); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a546ac8685ea..0ca530afc684 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -14,6 +14,7 @@ #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/bio.h> +#include <linux/swap.h> #include <linux/prefetch.h> #include <linux/uio.h> #include <linux/cleancache.h> @@ -54,7 +55,7 @@ static bool __is_cp_guaranteed(struct page *page) static enum count_type __read_io_type(struct page *page) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = page_file_mapping(page); if (mapping) { struct inode *inode = mapping->host; @@ -347,20 +348,20 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) io->bio = NULL; } -static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, +static bool __has_merged_page(struct bio *bio, struct inode *inode, struct page *page, nid_t ino) { struct bio_vec *bvec; struct page *target; struct bvec_iter_all iter_all; - if (!io->bio) + if (!bio) return false; if (!inode && !page && !ino) return true; - bio_for_each_segment_all(bvec, io->bio, iter_all) { + bio_for_each_segment_all(bvec, bio, iter_all) { target = bvec->bv_page; if (fscrypt_is_bounce_page(target)) @@ -410,7 +411,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, struct f2fs_bio_info *io = sbi->write_io[btype] + temp; down_read(&io->io_rwsem); - ret = __has_merged_page(io, inode, page, ino); + ret = __has_merged_page(io->bio, inode, page, ino); up_read(&io->io_rwsem); } if (ret) @@ -454,7 +455,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, fio->is_por ? META_POR : (__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC_ENHANCE))) - return -EFAULT; + return -EFSCORRUPTED; trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); @@ -480,6 +481,61 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) return 0; } +int f2fs_merge_page_bio(struct f2fs_io_info *fio) +{ + struct bio *bio = *fio->bio; + struct page *page = fio->encrypted_page ? + fio->encrypted_page : fio->page; + + if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, + __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) + return -EFSCORRUPTED; + + trace_f2fs_submit_page_bio(page, fio); + f2fs_trace_ios(fio, 0); + + if (bio && (*fio->last_block + 1 != fio->new_blkaddr || + !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) { + __submit_bio(fio->sbi, bio, fio->type); + bio = NULL; + } +alloc_new: + if (!bio) { + bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, + BIO_MAX_PAGES, false, fio->type, fio->temp); + bio_set_op_attrs(bio, fio->op, fio->op_flags); + } + + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { + __submit_bio(fio->sbi, bio, fio->type); + bio = NULL; + goto alloc_new; + } + + if (fio->io_wbc) + wbc_account_io(fio->io_wbc, page, PAGE_SIZE); + + inc_page_count(fio->sbi, WB_DATA_TYPE(page)); + + *fio->last_block = fio->new_blkaddr; + *fio->bio = bio; + + return 0; +} + +static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio, + struct page *page) +{ + if (!bio) + return; + + if (!__has_merged_page(*bio, NULL, page, 0)) + return; + + __submit_bio(sbi, *bio, DATA); + *bio = NULL; +} + void f2fs_submit_page_write(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; @@ -733,7 +789,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, dn.data_blkaddr = ei.blk + index - ei.fofs; if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto put_err; } goto got_it; @@ -753,7 +809,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, DATA_GENERIC_ENHANCE)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto put_err; } got_it: @@ -1099,7 +1155,7 @@ next_block: if (__is_valid_data_blkaddr(blkaddr) && !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto sync_out; } @@ -1529,7 +1585,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, sector_t block_nr; int ret = 0; - block_in_file = (sector_t)page->index; + block_in_file = (sector_t)page_index(page); last_block = block_in_file + nr_pages; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; @@ -1562,14 +1618,15 @@ got_it: block_nr = map->m_pblk + block_in_file - map->m_lblk; SetPageMappedToDisk(page); - if (!PageUptodate(page) && !cleancache_get_page(page)) { + if (!PageUptodate(page) && (!PageSwapCache(page) && + !cleancache_get_page(page))) { SetPageUptodate(page); goto confused; } if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, DATA_GENERIC_ENHANCE_READ)) { - ret = -EFAULT; + ret = -EFSCORRUPTED; goto out; } } else { @@ -1660,7 +1717,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, prefetchw(&page->flags); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, - page->index, + page_index(page), readahead_gfp_mask(mapping))) goto next_page; } @@ -1684,7 +1741,7 @@ next_page: static int f2fs_read_data_page(struct file *file, struct page *page) { - struct inode *inode = page->mapping->host; + struct inode *inode = page_file_mapping(page)->host; int ret = -EAGAIN; trace_f2fs_readpage(page, DATA); @@ -1693,7 +1750,8 @@ static int f2fs_read_data_page(struct file *file, struct page *page) if (f2fs_has_inline_data(inode)) ret = f2fs_read_inline_data(inode, page); if (ret == -EAGAIN) - ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false); + ret = f2fs_mpage_readpages(page_file_mapping(page), + NULL, page, 1, false); return ret; } @@ -1851,7 +1909,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, DATA_GENERIC_ENHANCE)) - return -EFAULT; + return -EFSCORRUPTED; ipu_force = true; fio->need_lock = LOCK_DONE; @@ -1878,7 +1936,7 @@ got_it: if (__is_valid_data_blkaddr(fio->old_blkaddr) && !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, DATA_GENERIC_ENHANCE)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto out_writepage; } /* @@ -1946,6 +2004,8 @@ out: } static int __write_data_page(struct page *page, bool *submitted, + struct bio **bio, + sector_t *last_block, struct writeback_control *wbc, enum iostat_type io_type) { @@ -1971,6 +2031,8 @@ static int __write_data_page(struct page *page, bool *submitted, .need_lock = LOCK_RETRY, .io_type = io_type, .io_wbc = wbc, + .bio = bio, + .last_block = last_block, }; trace_f2fs_writepage(page, DATA); @@ -2069,10 +2131,13 @@ out: unlock_page(page); if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && - !F2FS_I(inode)->cp_task) + !F2FS_I(inode)->cp_task) { + f2fs_submit_ipu_bio(sbi, bio, page); f2fs_balance_fs(sbi, need_balance_fs); + } if (unlikely(f2fs_cp_error(sbi))) { + f2fs_submit_ipu_bio(sbi, bio, page); f2fs_submit_merged_write(sbi, DATA); submitted = NULL; } @@ -2099,7 +2164,7 @@ redirty_out: static int f2fs_write_data_page(struct page *page, struct writeback_control *wbc) { - return __write_data_page(page, NULL, wbc, FS_DATA_IO); + return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO); } /* @@ -2115,6 +2180,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping, int done = 0; struct pagevec pvec; struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); + struct bio *bio = NULL; + sector_t last_block; int nr_pages; pgoff_t uninitialized_var(writeback_index); pgoff_t index; @@ -2191,17 +2258,20 @@ continue_unlock: } if (PageWriteback(page)) { - if (wbc->sync_mode != WB_SYNC_NONE) + if (wbc->sync_mode != WB_SYNC_NONE) { f2fs_wait_on_page_writeback(page, DATA, true, true); - else + f2fs_submit_ipu_bio(sbi, &bio, page); + } else { goto continue_unlock; + } } if (!clear_page_dirty_for_io(page)) goto continue_unlock; - ret = __write_data_page(page, &submitted, wbc, io_type); + ret = __write_data_page(page, &submitted, &bio, + &last_block, wbc, io_type); if (unlikely(ret)) { /* * keep nr_to_write, since vfs uses this to @@ -2250,6 +2320,9 @@ continue_unlock: if (nwritten) f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, NULL, 0, DATA); + /* submit cached bio of IPU write */ + if (bio) + __submit_bio(sbi, bio, DATA); return ret; } @@ -2261,6 +2334,9 @@ static inline bool __should_serialize_io(struct inode *inode, return false; if (IS_NOQUOTA(inode)) return false; + /* to avoid deadlock in path of data flush */ + if (F2FS_I(inode)->cp_task) + return false; if (wbc->sync_mode != WB_SYNC_ALL) return true; if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) @@ -2532,7 +2608,7 @@ repeat: } else { if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto fail; } err = f2fs_submit_page_read(inode, page, blkaddr); @@ -2777,13 +2853,14 @@ int f2fs_release_page(struct page *page, gfp_t wait) static int f2fs_set_data_page_dirty(struct page *page) { - struct address_space *mapping = page->mapping; - struct inode *inode = mapping->host; + struct inode *inode = page_file_mapping(page)->host; trace_f2fs_set_page_dirty(page, DATA); if (!PageUptodate(page)) SetPageUptodate(page); + if (PageSwapCache(page)) + return __set_page_dirty_nobuffers(page); if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { if (!IS_ATOMIC_WRITTEN_PAGE(page)) { @@ -2875,6 +2952,126 @@ int f2fs_migrate_page(struct address_space *mapping, } #endif +#ifdef CONFIG_SWAP +/* Copied from generic_swapfile_activate() to check any holes */ +static int check_swap_activate(struct file *swap_file, unsigned int max) +{ + struct address_space *mapping = swap_file->f_mapping; + struct inode *inode = mapping->host; + unsigned blocks_per_page; + unsigned long page_no; + unsigned blkbits; + sector_t probe_block; + sector_t last_block; + sector_t lowest_block = -1; + sector_t highest_block = 0; + + blkbits = inode->i_blkbits; + blocks_per_page = PAGE_SIZE >> blkbits; + + /* + * Map all the blocks into the extent list. This code doesn't try + * to be very smart. + */ + probe_block = 0; + page_no = 0; + last_block = i_size_read(inode) >> blkbits; + while ((probe_block + blocks_per_page) <= last_block && page_no < max) { + unsigned block_in_page; + sector_t first_block; + + cond_resched(); + + first_block = bmap(inode, probe_block); + if (first_block == 0) + goto bad_bmap; + + /* + * It must be PAGE_SIZE aligned on-disk + */ + if (first_block & (blocks_per_page - 1)) { + probe_block++; + goto reprobe; + } + + for (block_in_page = 1; block_in_page < blocks_per_page; + block_in_page++) { + sector_t block; + + block = bmap(inode, probe_block + block_in_page); + if (block == 0) + goto bad_bmap; + if (block != first_block + block_in_page) { + /* Discontiguity */ + probe_block++; + goto reprobe; + } + } + + first_block >>= (PAGE_SHIFT - blkbits); + if (page_no) { /* exclude the header page */ + if (first_block < lowest_block) + lowest_block = first_block; + if (first_block > highest_block) + highest_block = first_block; + } + + page_no++; + probe_block += blocks_per_page; +reprobe: + continue; + } + return 0; + +bad_bmap: + pr_err("swapon: swapfile has holes\n"); + return -EINVAL; +} + +static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, + sector_t *span) +{ + struct inode *inode = file_inode(file); + int ret; + + if (!S_ISREG(inode->i_mode)) + return -EINVAL; + + if (f2fs_readonly(F2FS_I_SB(inode)->sb)) + return -EROFS; + + ret = f2fs_convert_inline_inode(inode); + if (ret) + return ret; + + ret = check_swap_activate(file, sis->max); + if (ret) + return ret; + + set_inode_flag(inode, FI_PIN_FILE); + f2fs_precache_extents(inode); + f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); + return 0; +} + +static void f2fs_swap_deactivate(struct file *file) +{ + struct inode *inode = file_inode(file); + + clear_inode_flag(inode, FI_PIN_FILE); +} +#else +static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, + sector_t *span) +{ + return -EOPNOTSUPP; +} + +static void f2fs_swap_deactivate(struct file *file) +{ +} +#endif + const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, .readpages = f2fs_read_data_pages, @@ -2887,6 +3084,8 @@ const struct address_space_operations f2fs_dblock_aops = { .releasepage = f2fs_release_page, .direct_IO = f2fs_direct_IO, .bmap = f2fs_bmap, + .swap_activate = f2fs_swap_activate, + .swap_deactivate = f2fs_swap_deactivate, #ifdef CONFIG_MIGRATION .migratepage = f2fs_migrate_page, #endif diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 99e9a5c37b71..7706049d23bf 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -27,8 +27,15 @@ static DEFINE_MUTEX(f2fs_stat_mutex); static void update_general_status(struct f2fs_sb_info *sbi) { struct f2fs_stat_info *si = F2FS_STAT(sbi); + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); int i; + /* these will be changed if online resize is done */ + si->main_area_segs = le32_to_cpu(raw_super->segment_count_main); + si->main_area_sections = le32_to_cpu(raw_super->section_count); + si->main_area_zones = si->main_area_sections / + le32_to_cpu(raw_super->secs_per_zone); + /* validation check of the segment numbers */ si->hit_largest = atomic64_read(&sbi->read_hit_largest); si->hit_cached = atomic64_read(&sbi->read_hit_cached); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 59bc46017855..85a1528f319f 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -218,9 +218,8 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, max_depth = F2FS_I(dir)->i_current_depth; if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { - f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING, - "Corrupted max_depth of %lu: %u", - dir->i_ino, max_depth); + f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u", + dir->i_ino, max_depth); max_depth = MAX_DIR_HASH_DEPTH; f2fs_i_depth_write(dir, max_depth); } @@ -816,11 +815,10 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); if (unlikely(bit_pos > d->max || le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: corrupted namelen=%d, run fsck to fix.", - __func__, le16_to_cpu(de->name_len)); + f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.", + __func__, le16_to_cpu(de->name_len)); set_sbi_flag(sbi, SBI_NEED_FSCK); - err = -EINVAL; + err = -EFSCORRUPTED; goto out; } @@ -828,8 +826,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, int save_len = fstr->len; err = fscrypt_fname_disk_to_usr(d->inode, - (u32)de->hash_code, 0, - &de_name, fstr); + (u32)le32_to_cpu(de->hash_code), + 0, &de_name, fstr); if (err) goto out; diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index caf77fe8ac07..e60078460ad1 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -184,10 +184,9 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, next_re = rb_entry(next, struct rb_entry, rb_node); if (cur_re->ofs + cur_re->len > next_re->ofs) { - f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, " - "cur(%u, %u) next(%u, %u)", - cur_re->ofs, cur_re->len, - next_re->ofs, next_re->len); + f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)", + cur_re->ofs, cur_re->len, + next_re->ofs, next_re->len); return false; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 06b89a9862ab..17382da7f0bd 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -136,6 +136,9 @@ struct f2fs_mount_info { int alloc_mode; /* segment allocation policy */ int fsync_mode; /* fsync policy */ bool test_dummy_encryption; /* test dummy encryption */ + block_t unusable_cap; /* Amount of space allowed to be + * unusable when disabling checkpoint + */ }; #define F2FS_FEATURE_ENCRYPT 0x0001 @@ -412,6 +415,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) #define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) +#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY @@ -476,8 +480,8 @@ static inline int get_inline_xattr_addrs(struct inode *inode); #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ BITS_PER_BYTE + 1)) -#define INLINE_DENTRY_BITMAP_SIZE(inode) ((NR_INLINE_DENTRY(inode) + \ - BITS_PER_BYTE - 1) / BITS_PER_BYTE) +#define INLINE_DENTRY_BITMAP_SIZE(inode) \ + DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ NR_INLINE_DENTRY(inode) + \ @@ -1052,6 +1056,8 @@ struct f2fs_io_info { bool retry; /* need to reallocate block address */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ + struct bio **bio; /* bio for ipu */ + sector_t *last_block; /* last block number in bio */ unsigned char version; /* version of the node */ }; @@ -1111,6 +1117,7 @@ enum { SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ + SBI_IS_RESIZEFS, /* resizefs is in process */ }; enum { @@ -1207,6 +1214,7 @@ struct f2fs_sb_info { /* for inode management */ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ + struct mutex flush_lock; /* for flush exclusion */ /* for extent tree cache */ struct radix_tree_root extent_tree_root;/* cache extent cache entries */ @@ -1230,6 +1238,7 @@ struct f2fs_sb_info { unsigned int segs_per_sec; /* segments per section */ unsigned int secs_per_zone; /* sections per zone */ unsigned int total_sections; /* total section count */ + struct mutex resize_mutex; /* for resize exclusion */ unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ loff_t max_file_blocks; /* max block index of file */ @@ -1247,6 +1256,7 @@ struct f2fs_sb_info { block_t unusable_block_count; /* # of blocks saved by last cp */ unsigned int nquota_files; /* # of quota sysfile */ + struct rw_semaphore quota_sem; /* blocking cp for flags */ /* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; @@ -1488,7 +1498,7 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) { - return F2FS_M_SB(page->mapping); + return F2FS_M_SB(page_file_mapping(page)); } static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) @@ -1766,8 +1776,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, true)) avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; - if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) - avail_user_block_count -= sbi->unusable_block_count; + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { + if (avail_user_block_count > sbi->unusable_block_count) + avail_user_block_count -= sbi->unusable_block_count; + else + avail_user_block_count = 0; + } if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { diff = sbi->total_valid_block_count - avail_user_block_count; if (diff > *count) @@ -1795,7 +1809,20 @@ enospc: return -ENOSPC; } -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); +__printf(2, 3) +void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); + +#define f2fs_err(sbi, fmt, ...) \ + f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) +#define f2fs_warn(sbi, fmt, ...) \ + f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) +#define f2fs_notice(sbi, fmt, ...) \ + f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) +#define f2fs_info(sbi, fmt, ...) \ + f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) +#define f2fs_debug(sbi, fmt, ...) \ + f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) + static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, block_t count) @@ -1811,11 +1838,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); if (unlikely(inode->i_blocks < sectors)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", - inode->i_ino, - (unsigned long long)inode->i_blocks, - (unsigned long long)sectors); + f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks, + (unsigned long long)sectors); set_sbi_flag(sbi, SBI_NEED_FSCK); return; } @@ -1967,7 +1993,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, struct inode *inode, bool is_inode) { block_t valid_block_count; - unsigned int valid_node_count; + unsigned int valid_node_count, user_block_count; int err; if (is_inode) { @@ -1994,10 +2020,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, false)) valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; + user_block_count = sbi->user_block_count; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) - valid_block_count += sbi->unusable_block_count; + user_block_count -= sbi->unusable_block_count; - if (unlikely(valid_block_count > sbi->user_block_count)) { + if (unlikely(valid_block_count > user_block_count)) { spin_unlock(&sbi->stat_lock); goto enospc; } @@ -2052,10 +2079,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, dquot_free_inode(inode); } else { if (unlikely(inode->i_blocks == 0)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "Inconsistent i_blocks, ino:%lu, iblocks:%llu", - inode->i_ino, - (unsigned long long)inode->i_blocks); + f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu", + inode->i_ino, + (unsigned long long)inode->i_blocks); set_sbi_flag(sbi, SBI_NEED_FSCK); return; } @@ -2191,6 +2217,9 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, static inline bool is_idle(struct f2fs_sb_info *sbi, int type) { + if (sbi->gc_mode == GC_URGENT) + return true; + if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || get_pages(sbi, F2FS_WB_CP_DATA) || @@ -2198,7 +2227,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type) get_pages(sbi, F2FS_DIO_WRITE)) return false; - if (SM_I(sbi) && SM_I(sbi)->dcc_info && + if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) return false; @@ -2320,57 +2349,23 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr) } /* - * Inode flags + * On-disk inode flags (f2fs_inode::i_flags) */ -#define F2FS_SECRM_FL 0x00000001 /* Secure deletion */ -#define F2FS_UNRM_FL 0x00000002 /* Undelete */ -#define F2FS_COMPR_FL 0x00000004 /* Compress file */ #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ -/* Reserved for compression usage... */ -#define F2FS_DIRTY_FL 0x00000100 -#define F2FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ -#define F2FS_NOCOMPR_FL 0x00000400 /* Don't compress */ -#define F2FS_ENCRYPT_FL 0x00000800 /* encrypted file */ -/* End compression flags --- maybe not all used */ #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ -#define F2FS_IMAGIC_FL 0x00002000 /* AFS directory */ -#define F2FS_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */ -#define F2FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ -#define F2FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ -#define F2FS_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ -#define F2FS_EXTENTS_FL 0x00080000 /* Inode uses extents */ -#define F2FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ -#define F2FS_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ -#define F2FS_NOCOW_FL 0x00800000 /* Do not cow file */ -#define F2FS_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ -#define F2FS_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ - -#define F2FS_FL_USER_VISIBLE 0x30CBDFFF /* User visible flags */ -#define F2FS_FL_USER_MODIFIABLE 0x204BC0FF /* User modifiable flags */ - -/* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */ -#define F2FS_FL_XFLAG_VISIBLE (F2FS_SYNC_FL | \ - F2FS_IMMUTABLE_FL | \ - F2FS_APPEND_FL | \ - F2FS_NODUMP_FL | \ - F2FS_NOATIME_FL | \ - F2FS_PROJINHERIT_FL) /* Flags that should be inherited by new inodes from their parent. */ -#define F2FS_FL_INHERITED (F2FS_SECRM_FL | F2FS_UNRM_FL | F2FS_COMPR_FL |\ - F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL |\ - F2FS_NOCOMPR_FL | F2FS_JOURNAL_DATA_FL |\ - F2FS_NOTAIL_FL | F2FS_DIRSYNC_FL |\ - F2FS_PROJINHERIT_FL) +#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ + F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL) /* Flags that are appropriate for regular files (all but dir-specific ones). */ -#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_TOPDIR_FL)) +#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL)) /* Flags that are appropriate for non-directories/regular files. */ #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) @@ -2856,9 +2851,8 @@ static inline void verify_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { - f2fs_msg(sbi->sb, KERN_ERR, - "invalid blkaddr: %u, type: %d, run fsck to fix.", - blkaddr, type); + f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", + blkaddr, type); f2fs_bug_on(sbi, 1); } } @@ -2989,8 +2983,6 @@ int f2fs_quota_sync(struct super_block *sb, int type); void f2fs_quota_off_umount(struct super_block *sb); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); -extern __printf(3, 4) -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); /* @@ -3074,9 +3066,12 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); -int f2fs_disable_cp_again(struct f2fs_sb_info *sbi); +block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); +int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); +void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, + unsigned int start, unsigned int end); void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, @@ -3169,6 +3164,7 @@ void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, nid_t ino, enum page_type type); void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); int f2fs_submit_page_bio(struct f2fs_io_info *fio); +int f2fs_merge_page_bio(struct f2fs_io_info *fio); void f2fs_submit_page_write(struct f2fs_io_info *fio); struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, block_t blk_addr, struct bio *bio); @@ -3214,6 +3210,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, unsigned int segno); void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); +int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); /* * recovery.c @@ -3686,7 +3683,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, if (test_opt(sbi, LFS) && (rw == WRITE) && block_unaligned_IO(inode, iocb, iter)) return true; - if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) + if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && + !(inode->i_flags & S_SWAPFILE)) return true; return false; @@ -3712,4 +3710,7 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) return false; } +#define EFSBADCRC EBADMSG /* Bad CRC detected */ +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + #endif /* _LINUX_F2FS_H */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 45b45f37d347..f8d46df8fa9e 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -707,11 +707,9 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, stat->btime.tv_nsec = fi->i_crtime.tv_nsec; } - flags = fi->i_flags & F2FS_FL_USER_VISIBLE; + flags = fi->i_flags; if (flags & F2FS_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; - if (flags & F2FS_COMPR_FL) - stat->attributes |= STATX_ATTR_COMPRESSED; if (IS_ENCRYPTED(inode)) stat->attributes |= STATX_ATTR_ENCRYPTED; if (flags & F2FS_IMMUTABLE_FL) @@ -720,7 +718,6 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, stat->attributes |= STATX_ATTR_NODUMP; stat->attributes_mask |= (STATX_ATTR_APPEND | - STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP); @@ -1026,7 +1023,7 @@ next_dnode: !f2fs_is_valid_blkaddr(sbi, *blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_put_dnode(&dn); - return -EFAULT; + return -EFSCORRUPTED; } if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { @@ -1214,7 +1211,7 @@ roll_back: static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; + pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); pgoff_t start = offset >> PAGE_SHIFT; pgoff_t end = (offset + len) >> PAGE_SHIFT; int ret; @@ -1467,7 +1464,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) pg_start = offset >> PAGE_SHIFT; pg_end = (offset + len) >> PAGE_SHIFT; delta = pg_end - pg_start; - idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; + idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); /* avoid gc operation during block exchange */ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); @@ -1531,7 +1528,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset, if (off_end) map.m_len++; - err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); + if (f2fs_is_pinned_file(inode)) + map.m_seg_type = CURSEG_COLD_DATA; + + err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ? + F2FS_GET_BLOCK_PRE_DIO : + F2FS_GET_BLOCK_PRE_AIO)); if (err) { pgoff_t last_off; @@ -1648,44 +1650,22 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id) return 0; } -static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) +static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { - struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); - unsigned int flags = fi->i_flags; - - if (IS_ENCRYPTED(inode)) - flags |= F2FS_ENCRYPT_FL; - if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) - flags |= F2FS_INLINE_DATA_FL; - if (is_inode_flag_set(inode, FI_PIN_FILE)) - flags |= F2FS_NOCOW_FL; - - flags &= F2FS_FL_USER_VISIBLE; - - return put_user(flags, (int __user *)arg); -} - -static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) -{ - struct f2fs_inode_info *fi = F2FS_I(inode); - unsigned int oldflags; + u32 oldflags; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; - flags = f2fs_mask_flags(inode->i_mode, flags); - oldflags = fi->i_flags; - if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) + if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) if (!capable(CAP_LINUX_IMMUTABLE)) return -EPERM; - flags = flags & F2FS_FL_USER_MODIFIABLE; - flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE; - fi->i_flags = flags; + fi->i_flags = iflags | (oldflags & ~mask); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); @@ -1698,26 +1678,124 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) return 0; } +/* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ + +/* + * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry + * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to + * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add + * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. + */ + +static const struct { + u32 iflag; + u32 fsflag; +} f2fs_fsflags_map[] = { + { F2FS_SYNC_FL, FS_SYNC_FL }, + { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, + { F2FS_APPEND_FL, FS_APPEND_FL }, + { F2FS_NODUMP_FL, FS_NODUMP_FL }, + { F2FS_NOATIME_FL, FS_NOATIME_FL }, + { F2FS_INDEX_FL, FS_INDEX_FL }, + { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, + { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, +}; + +#define F2FS_GETTABLE_FS_FL ( \ + FS_SYNC_FL | \ + FS_IMMUTABLE_FL | \ + FS_APPEND_FL | \ + FS_NODUMP_FL | \ + FS_NOATIME_FL | \ + FS_INDEX_FL | \ + FS_DIRSYNC_FL | \ + FS_PROJINHERIT_FL | \ + FS_ENCRYPT_FL | \ + FS_INLINE_DATA_FL | \ + FS_NOCOW_FL) + +#define F2FS_SETTABLE_FS_FL ( \ + FS_SYNC_FL | \ + FS_IMMUTABLE_FL | \ + FS_APPEND_FL | \ + FS_NODUMP_FL | \ + FS_NOATIME_FL | \ + FS_DIRSYNC_FL | \ + FS_PROJINHERIT_FL) + +/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ +static inline u32 f2fs_iflags_to_fsflags(u32 iflags) +{ + u32 fsflags = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) + if (iflags & f2fs_fsflags_map[i].iflag) + fsflags |= f2fs_fsflags_map[i].fsflag; + + return fsflags; +} + +/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ +static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) +{ + u32 iflags = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) + if (fsflags & f2fs_fsflags_map[i].fsflag) + iflags |= f2fs_fsflags_map[i].iflag; + + return iflags; +} + +static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_inode_info *fi = F2FS_I(inode); + u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); + + if (IS_ENCRYPTED(inode)) + fsflags |= FS_ENCRYPT_FL; + if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) + fsflags |= FS_INLINE_DATA_FL; + if (is_inode_flag_set(inode, FI_PIN_FILE)) + fsflags |= FS_NOCOW_FL; + + fsflags &= F2FS_GETTABLE_FS_FL; + + return put_user(fsflags, (int __user *)arg); +} + static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - unsigned int flags; + u32 fsflags; + u32 iflags; int ret; if (!inode_owner_or_capable(inode)) return -EACCES; - if (get_user(flags, (int __user *)arg)) + if (get_user(fsflags, (int __user *)arg)) return -EFAULT; + if (fsflags & ~F2FS_GETTABLE_FS_FL) + return -EOPNOTSUPP; + fsflags &= F2FS_SETTABLE_FS_FL; + + iflags = f2fs_fsflags_to_iflags(fsflags); + if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) + return -EOPNOTSUPP; + ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); - ret = __f2fs_ioc_setflags(inode, flags); - + ret = f2fs_setflags_common(inode, iflags, + f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -1764,9 +1842,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) * f2fs_is_atomic_file. */ if (get_dirty_pages(inode)) - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, - "Unexpected flush for atomic writes: ino=%lu, npages=%u", - inode->i_ino, get_dirty_pages(inode)); + f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", + inode->i_ino, get_dirty_pages(inode)); ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) { up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); @@ -2201,8 +2278,7 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { - f2fs_msg(sbi->sb, KERN_INFO, - "Skipping Checkpoint. Checkpoints currently disabled."); + f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); return -EINVAL; } @@ -2291,7 +2367,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, if (!fragmented) goto out; - sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); + sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); /* * make sure there are enough free section for LFS allocation, this can @@ -2587,10 +2663,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || __is_large_section(sbi)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "Can't flush %u in %d for segs_per_sec %u != 1", - range.dev_num, sbi->s_ndevs, - sbi->segs_per_sec); + f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", + range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); return -EINVAL; } @@ -2727,47 +2801,56 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid) } #endif -/* Transfer internal flags to xflags */ -static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) -{ - __u32 xflags = 0; - - if (iflags & F2FS_SYNC_FL) - xflags |= FS_XFLAG_SYNC; - if (iflags & F2FS_IMMUTABLE_FL) - xflags |= FS_XFLAG_IMMUTABLE; - if (iflags & F2FS_APPEND_FL) - xflags |= FS_XFLAG_APPEND; - if (iflags & F2FS_NODUMP_FL) - xflags |= FS_XFLAG_NODUMP; - if (iflags & F2FS_NOATIME_FL) - xflags |= FS_XFLAG_NOATIME; - if (iflags & F2FS_PROJINHERIT_FL) - xflags |= FS_XFLAG_PROJINHERIT; +/* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ + +/* + * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable + * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its + * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. + */ + +static const struct { + u32 iflag; + u32 xflag; +} f2fs_xflags_map[] = { + { F2FS_SYNC_FL, FS_XFLAG_SYNC }, + { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, + { F2FS_APPEND_FL, FS_XFLAG_APPEND }, + { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, + { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, + { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, +}; + +#define F2FS_SUPPORTED_XFLAGS ( \ + FS_XFLAG_SYNC | \ + FS_XFLAG_IMMUTABLE | \ + FS_XFLAG_APPEND | \ + FS_XFLAG_NODUMP | \ + FS_XFLAG_NOATIME | \ + FS_XFLAG_PROJINHERIT) + +/* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ +static inline u32 f2fs_iflags_to_xflags(u32 iflags) +{ + u32 xflags = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) + if (iflags & f2fs_xflags_map[i].iflag) + xflags |= f2fs_xflags_map[i].xflag; + return xflags; } -#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ - FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ - FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) - -/* Transfer xflags flags to internal */ -static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) +/* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ +static inline u32 f2fs_xflags_to_iflags(u32 xflags) { - unsigned long iflags = 0; + u32 iflags = 0; + int i; - if (xflags & FS_XFLAG_SYNC) - iflags |= F2FS_SYNC_FL; - if (xflags & FS_XFLAG_IMMUTABLE) - iflags |= F2FS_IMMUTABLE_FL; - if (xflags & FS_XFLAG_APPEND) - iflags |= F2FS_APPEND_FL; - if (xflags & FS_XFLAG_NODUMP) - iflags |= F2FS_NODUMP_FL; - if (xflags & FS_XFLAG_NOATIME) - iflags |= F2FS_NOATIME_FL; - if (xflags & FS_XFLAG_PROJINHERIT) - iflags |= F2FS_PROJINHERIT_FL; + for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) + if (xflags & f2fs_xflags_map[i].xflag) + iflags |= f2fs_xflags_map[i].iflag; return iflags; } @@ -2779,8 +2862,7 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) struct fsxattr fa; memset(&fa, 0, sizeof(struct fsxattr)); - fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & - F2FS_FL_USER_VISIBLE); + fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, @@ -2818,9 +2900,8 @@ static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - struct f2fs_inode_info *fi = F2FS_I(inode); struct fsxattr fa; - unsigned int flags; + u32 iflags; int err; if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) @@ -2830,11 +2911,11 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) if (!inode_owner_or_capable(inode)) return -EACCES; - if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) + if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) return -EOPNOTSUPP; - flags = f2fs_xflags_to_iflags(fa.fsx_xflags); - if (f2fs_mask_flags(inode->i_mode, flags) != flags) + iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); + if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) return -EOPNOTSUPP; err = mnt_want_write_file(filp); @@ -2845,9 +2926,8 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) err = f2fs_ioctl_check_project(inode, &fa); if (err) goto out; - flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | - (flags & F2FS_FL_XFLAG_VISIBLE); - err = __f2fs_ioc_setflags(inode, flags); + err = f2fs_setflags_common(inode, iflags, + f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); if (err) goto out; @@ -2869,10 +2949,9 @@ int f2fs_pin_file_control(struct inode *inode, bool inc) fi->i_gc_failures[GC_FAILURE_PIN] + 1); if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: Enable GC = ino %lx after %x GC trials", - __func__, inode->i_ino, - fi->i_gc_failures[GC_FAILURE_PIN]); + f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", + __func__, inode->i_ino, + fi->i_gc_failures[GC_FAILURE_PIN]); clear_inode_flag(inode, FI_PIN_FILE); return -EAGAIN; } @@ -2885,9 +2964,6 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) __u32 pin; int ret = 0; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (get_user(pin, (__u32 __user *)arg)) return -EFAULT; @@ -2980,6 +3056,27 @@ static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) return f2fs_precache_extents(file_inode(filp)); } +static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); + __u64 block_count; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (f2fs_readonly(sbi->sb)) + return -EROFS; + + if (copy_from_user(&block_count, (void __user *)arg, + sizeof(block_count))) + return -EFAULT; + + ret = f2fs_resize_fs(sbi, block_count); + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3036,6 +3133,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_set_pin_file(filp, arg); case F2FS_IOC_PRECACHE_EXTENTS: return f2fs_ioc_precache_extents(filp, arg); + case F2FS_IOC_RESIZE_FS: + return f2fs_ioc_resize_fs(filp, arg); default: return -ENOTTY; } @@ -3149,6 +3248,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_GET_PIN_FILE: case F2FS_IOC_SET_PIN_FILE: case F2FS_IOC_PRECACHE_EXTENTS: + case F2FS_IOC_RESIZE_FS: break; default: return -ENOIOCTLCMD; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 963fb4571fd9..6691f526fa40 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -311,10 +311,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, struct sit_info *sm = SIT_I(sbi); struct victim_sel_policy p; unsigned int secno, last_victim; - unsigned int last_segment = MAIN_SEGS(sbi); + unsigned int last_segment; unsigned int nsearched = 0; mutex_lock(&dirty_i->seglist_lock); + last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; p.alloc_mode = alloc_mode; select_policy(sbi, gc_type, type, &p); @@ -387,7 +388,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, goto next; /* Don't touch checkpointed data */ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && - get_ckpt_valid_blocks(sbi, segno))) + get_ckpt_valid_blocks(sbi, segno) && + p.alloc_mode != SSR)) goto next; if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) goto next; @@ -404,7 +406,8 @@ next: sm->last_victim[p.gc_mode] = last_victim + 1; else sm->last_victim[p.gc_mode] = segno + 1; - sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); + sm->last_victim[p.gc_mode] %= + (MAIN_SECS(sbi) * sbi->segs_per_sec); break; } } @@ -615,9 +618,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, } if (sum->version != dni->version) { - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: valid data with mismatched node version.", - __func__); + f2fs_warn(sbi, "%s: valid data with mismatched node version.", + __func__); set_sbi_flag(sbi, SBI_NEED_FSCK); } @@ -658,7 +660,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) dn.data_blkaddr = ei.blk + index - ei.fofs; if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ))) { - err = -EFAULT; + err = -EFSCORRUPTED; goto put_page; } goto got_it; @@ -676,7 +678,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) } if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC_ENHANCE))) { - err = -EFAULT; + err = -EFSCORRUPTED; goto put_page; } got_it: @@ -1180,9 +1182,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, sum = page_address(sum_page); if (type != GET_SUM_TYPE((&sum->footer))) { - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " - "type [%d, %d] in SSA and SIT", - segno, type, GET_SUM_TYPE((&sum->footer))); + f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", + segno, type, GET_SUM_TYPE((&sum->footer))); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_stop_checkpoint(sbi, false); goto skip; @@ -1360,3 +1361,176 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) SIT_I(sbi)->last_victim[ALLOC_NEXT] = GET_SEGNO(sbi, FDEV(0).end_blk) + 1; } + +static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, + unsigned int end) +{ + int type; + unsigned int segno, next_inuse; + int err = 0; + + /* Move out cursegs from the target range */ + for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) + allocate_segment_for_resize(sbi, type, start, end); + + /* do GC to move out valid blocks in the range */ + for (segno = start; segno <= end; segno += sbi->segs_per_sec) { + struct gc_inode_list gc_list = { + .ilist = LIST_HEAD_INIT(gc_list.ilist), + .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), + }; + + mutex_lock(&sbi->gc_mutex); + do_garbage_collect(sbi, segno, &gc_list, FG_GC); + mutex_unlock(&sbi->gc_mutex); + put_gc_inode(&gc_list); + + if (get_valid_blocks(sbi, segno, true)) + return -EAGAIN; + } + + err = f2fs_sync_fs(sbi->sb, 1); + if (err) + return err; + + next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); + if (next_inuse <= end) { + f2fs_err(sbi, "segno %u should be free but still inuse!", + next_inuse); + f2fs_bug_on(sbi, 1); + } + return err; +} + +static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) +{ + struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); + int section_count = le32_to_cpu(raw_sb->section_count); + int segment_count = le32_to_cpu(raw_sb->segment_count); + int segment_count_main = le32_to_cpu(raw_sb->segment_count_main); + long long block_count = le64_to_cpu(raw_sb->block_count); + int segs = secs * sbi->segs_per_sec; + + raw_sb->section_count = cpu_to_le32(section_count + secs); + raw_sb->segment_count = cpu_to_le32(segment_count + segs); + raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); + raw_sb->block_count = cpu_to_le64(block_count + + (long long)segs * sbi->blocks_per_seg); +} + +static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) +{ + int segs = secs * sbi->segs_per_sec; + long long user_block_count = + le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); + + SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; + MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; + FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; + FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; + F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + + (long long)segs * sbi->blocks_per_seg); +} + +int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) +{ + __u64 old_block_count, shrunk_blocks; + unsigned int secs; + int gc_mode, gc_type; + int err = 0; + __u32 rem; + + old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); + if (block_count > old_block_count) + return -EINVAL; + + /* new fs size should align to section size */ + div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); + if (rem) + return -EINVAL; + + if (block_count == old_block_count) + return 0; + + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { + f2fs_err(sbi, "Should run fsck to repair first."); + return -EFSCORRUPTED; + } + + if (test_opt(sbi, DISABLE_CHECKPOINT)) { + f2fs_err(sbi, "Checkpoint should be enabled."); + return -EINVAL; + } + + freeze_bdev(sbi->sb->s_bdev); + + shrunk_blocks = old_block_count - block_count; + secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); + spin_lock(&sbi->stat_lock); + if (shrunk_blocks + valid_user_blocks(sbi) + + sbi->current_reserved_blocks + sbi->unusable_block_count + + F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) + err = -ENOSPC; + else + sbi->user_block_count -= shrunk_blocks; + spin_unlock(&sbi->stat_lock); + if (err) { + thaw_bdev(sbi->sb->s_bdev, sbi->sb); + return err; + } + + mutex_lock(&sbi->resize_mutex); + set_sbi_flag(sbi, SBI_IS_RESIZEFS); + + mutex_lock(&DIRTY_I(sbi)->seglist_lock); + + MAIN_SECS(sbi) -= secs; + + for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) + if (SIT_I(sbi)->last_victim[gc_mode] >= + MAIN_SECS(sbi) * sbi->segs_per_sec) + SIT_I(sbi)->last_victim[gc_mode] = 0; + + for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) + if (sbi->next_victim_seg[gc_type] >= + MAIN_SECS(sbi) * sbi->segs_per_sec) + sbi->next_victim_seg[gc_type] = NULL_SEGNO; + + mutex_unlock(&DIRTY_I(sbi)->seglist_lock); + + err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec, + MAIN_SEGS(sbi) - 1); + if (err) + goto out; + + update_sb_metadata(sbi, -secs); + + err = f2fs_commit_super(sbi, false); + if (err) { + update_sb_metadata(sbi, secs); + goto out; + } + + update_fs_metadata(sbi, -secs); + clear_sbi_flag(sbi, SBI_IS_RESIZEFS); + err = f2fs_sync_fs(sbi->sb, 1); + if (err) { + update_fs_metadata(sbi, secs); + update_sb_metadata(sbi, secs); + f2fs_commit_super(sbi, false); + } +out: + if (err) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); + + MAIN_SECS(sbi) += secs; + spin_lock(&sbi->stat_lock); + sbi->user_block_count += shrunk_blocks; + spin_unlock(&sbi->stat_lock); + } + clear_sbi_flag(sbi, SBI_IS_RESIZEFS); + mutex_unlock(&sbi->resize_mutex); + thaw_bdev(sbi->sb->s_bdev, sbi->sb); + return err; +} diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 404d2462a0fe..3613efca8c00 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -140,11 +140,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) if (unlikely(dn->data_blkaddr != NEW_ADDR)) { f2fs_put_dnode(dn); set_sbi_flag(fio.sbi, SBI_NEED_FSCK); - f2fs_msg(fio.sbi->sb, KERN_WARNING, - "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " - "run fsck to fix.", - __func__, dn->inode->i_ino, dn->data_blkaddr); - return -EINVAL; + f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", + __func__, dn->inode->i_ino, dn->data_blkaddr); + return -EFSCORRUPTED; } f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); @@ -383,11 +381,9 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, if (unlikely(dn.data_blkaddr != NEW_ADDR)) { f2fs_put_dnode(&dn); set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); - f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING, - "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " - "run fsck to fix.", - __func__, dir->i_ino, dn.data_blkaddr); - err = -EINVAL; + f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", + __func__, dir->i_ino, dn.data_blkaddr); + err = -EFSCORRUPTED; goto out; } diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index ccb02226dd2c..a33d7a849b2d 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -74,7 +74,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi, if (!__is_valid_data_blkaddr(addr)) return 1; if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) - return -EFAULT; + return -EFSCORRUPTED; return 0; } @@ -176,9 +176,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) calculated = f2fs_inode_chksum(sbi, page); if (provided != calculated) - f2fs_msg(sbi->sb, KERN_WARNING, - "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", - page->index, ino_of_node(page), provided, calculated); + f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", + page->index, ino_of_node(page), provided, calculated); return provided == calculated; } @@ -202,50 +201,41 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); if (!iblocks) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, " - "run fsck to fix.", - __func__, inode->i_ino, iblocks); + f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", + __func__, inode->i_ino, iblocks); return false; } if (ino_of_node(node_page) != nid_of_node(node_page)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: corrupted inode footer i_ino=%lx, ino,nid: " - "[%u, %u] run fsck to fix.", - __func__, inode->i_ino, - ino_of_node(node_page), nid_of_node(node_page)); + f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", + __func__, inode->i_ino, + ino_of_node(node_page), nid_of_node(node_page)); return false; } if (f2fs_sb_has_flexible_inline_xattr(sbi) && !f2fs_has_extra_attr(inode)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: corrupted inode ino=%lx, run fsck to fix.", - __func__, inode->i_ino); + f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", + __func__, inode->i_ino); return false; } if (f2fs_has_extra_attr(inode) && !f2fs_sb_has_extra_attr(sbi)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx) is with extra_attr, " - "but extra_attr feature is off", - __func__, inode->i_ino); + f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", + __func__, inode->i_ino); return false; } if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || fi->i_extra_isize % sizeof(__le32)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, " - "max: %zu", - __func__, inode->i_ino, fi->i_extra_isize, - F2FS_TOTAL_EXTRA_ATTR_SIZE); + f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", + __func__, inode->i_ino, fi->i_extra_isize, + F2FS_TOTAL_EXTRA_ATTR_SIZE); return false; } @@ -255,11 +245,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) (!fi->i_inline_xattr_size || fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx) has corrupted " - "i_inline_xattr_size: %d, max: %zu", - __func__, inode->i_ino, fi->i_inline_xattr_size, - MAX_INLINE_XATTR_SIZE); + f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", + __func__, inode->i_ino, fi->i_inline_xattr_size, + MAX_INLINE_XATTR_SIZE); return false; } @@ -272,11 +260,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, DATA_GENERIC_ENHANCE))) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx) extent info [%u, %u, %u] " - "is incorrect, run fsck to fix", - __func__, inode->i_ino, - ei->blk, ei->fofs, ei->len); + f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", + __func__, inode->i_ino, + ei->blk, ei->fofs, ei->len); return false; } } @@ -284,19 +270,15 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) if (f2fs_has_inline_data(inode) && (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx, mode=%u) should not have " - "inline_data, run fsck to fix", - __func__, inode->i_ino, inode->i_mode); + f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", + __func__, inode->i_ino, inode->i_mode); return false; } if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: inode (ino=%lx, mode=%u) should not have " - "inline_dentry, run fsck to fix", - __func__, inode->i_ino, inode->i_mode); + f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", + __func__, inode->i_ino, inode->i_mode); return false; } @@ -343,6 +325,8 @@ static int do_read_inode(struct inode *inode) le16_to_cpu(ri->i_gc_failures); fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); fi->i_flags = le32_to_cpu(ri->i_flags); + if (S_ISREG(inode->i_mode)) + fi->i_flags &= ~F2FS_PROJINHERIT_FL; fi->flags = 0; fi->i_advise = ri->i_advise; fi->i_pino = le32_to_cpu(ri->i_pino); @@ -374,7 +358,7 @@ static int do_read_inode(struct inode *inode) if (!sanity_check_inode(inode, node_page)) { f2fs_put_page(node_page, 1); - return -EINVAL; + return -EFSCORRUPTED; } /* check data exist */ @@ -783,8 +767,7 @@ void f2fs_handle_failed_inode(struct inode *inode) err = f2fs_get_node_info(sbi, inode->i_ino, &ni); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "May loss orphan inode, run fsck to fix."); + f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); goto out; } @@ -792,8 +775,7 @@ void f2fs_handle_failed_inode(struct inode *inode) err = f2fs_acquire_orphan_inode(sbi); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "Too many orphan inodes, run fsck to fix."); + f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); } else { f2fs_add_orphan_inode(inode); } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 0f77f9242751..c5b99042e6f2 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -385,9 +385,8 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino) int err = 0; if (f2fs_readonly(sbi->sb)) { - f2fs_msg(sbi->sb, KERN_INFO, - "skip recovering inline_dots inode (ino:%lu, pino:%u) " - "in readonly mountpoint", dir->i_ino, pino); + f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", + dir->i_ino, pino); return 0; } @@ -484,9 +483,8 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, if (IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { - f2fs_msg(inode->i_sb, KERN_WARNING, - "Inconsistent encryption contexts: %lu/%lu", - dir->i_ino, inode->i_ino); + f2fs_warn(F2FS_I_SB(inode), "Inconsistent encryption contexts: %lu/%lu", + dir->i_ino, inode->i_ino); err = -EPERM; goto out_iput; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 18a038a2a9fa..a18b2a895771 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -34,10 +34,9 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) { if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: out-of-range nid=%x, run fsck to fix.", - __func__, nid); - return -EINVAL; + f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", + __func__, nid); + return -EFSCORRUPTED; } return 0; } @@ -1189,10 +1188,8 @@ int f2fs_remove_inode_page(struct inode *inode) } if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, - "Inconsistent i_blocks, ino:%lu, iblocks:%llu", - inode->i_ino, - (unsigned long long)inode->i_blocks); + f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu", + inode->i_ino, (unsigned long long)inode->i_blocks); set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); } @@ -1291,7 +1288,7 @@ static int read_node_page(struct page *page, int op_flags) if (PageUptodate(page)) { if (!f2fs_inode_chksum_verify(sbi, page)) { ClearPageUptodate(page); - return -EBADMSG; + return -EFSBADCRC; } return LOCKED_PAGE; } @@ -1375,16 +1372,15 @@ repeat: } if (!f2fs_inode_chksum_verify(sbi, page)) { - err = -EBADMSG; + err = -EFSBADCRC; goto out_err; } page_hit: if(unlikely(nid != nid_of_node(page))) { - f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, " - "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", - nid, nid_of_node(page), ino_of_node(page), - ofs_of_node(page), cpver_of_node(page), - next_blkaddr_of_node(page)); + f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", + nid, nid_of_node(page), ino_of_node(page), + ofs_of_node(page), cpver_of_node(page), + next_blkaddr_of_node(page)); err = -EINVAL; out_err: ClearPageUptodate(page); @@ -1752,9 +1748,8 @@ continue_unlock: break; } if (!ret && atomic && !marked) { - f2fs_msg(sbi->sb, KERN_DEBUG, - "Retry to write fsync mark: ino=%u, idx=%lx", - ino, last_page->index); + f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", + ino, last_page->index); lock_page(last_page); f2fs_wait_on_page_writeback(last_page, NODE, true, true); set_page_dirty(last_page); @@ -2304,8 +2299,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (ret) { up_read(&nm_i->nat_tree_lock); f2fs_bug_on(sbi, !mount); - f2fs_msg(sbi->sb, KERN_ERR, - "NAT is corrupt, run fsck to fix it"); + f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); return ret; } } @@ -2725,7 +2719,7 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, i = 1; } for (; i < NAT_ENTRY_PER_BLOCK; i++) { - if (nat_blk->entries[i].block_addr != NULL_ADDR) + if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) valid++; } if (valid == 0) { @@ -2915,7 +2909,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) nm_i->full_nat_bits = nm_i->nat_bits + 8; nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; - f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); + f2fs_notice(sbi, "Found nat_bits in checkpoint"); return 0; } diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index e04f82b3f4fc..783773e4560d 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -188,10 +188,9 @@ out: name = "<encrypted>"; else name = raw_inode->i_name; - f2fs_msg(inode->i_sb, KERN_NOTICE, - "%s: ino = %x, name = %s, dir = %lx, err = %d", - __func__, ino_of_node(ipage), name, - IS_ERR(dir) ? 0 : dir->i_ino, err); + f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d", + __func__, ino_of_node(ipage), name, + IS_ERR(dir) ? 0 : dir->i_ino, err); return err; } @@ -292,9 +291,8 @@ static int recover_inode(struct inode *inode, struct page *page) else name = F2FS_INODE(page)->i_name; - f2fs_msg(inode->i_sb, KERN_NOTICE, - "recover_inode: ino = %x, name = %s, inline = %x", - ino_of_node(page), name, raw->i_inline); + f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x", + ino_of_node(page), name, raw->i_inline); return 0; } @@ -371,10 +369,9 @@ next: /* sanity check in order to detect looped node chain */ if (++loop_cnt >= free_blocks || blkaddr == next_blkaddr_of_node(page)) { - f2fs_msg(sbi->sb, KERN_NOTICE, - "%s: detect looped node chain, " - "blkaddr:%u, next:%u", - __func__, blkaddr, next_blkaddr_of_node(page)); + f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u", + __func__, blkaddr, + next_blkaddr_of_node(page)); f2fs_put_page(page, 1); err = -EINVAL; break; @@ -553,11 +550,10 @@ retry_dn: f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", - inode->i_ino, ofs_of_node(dn.node_page), - ofs_of_node(page)); - err = -EFAULT; + f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", + inode->i_ino, ofs_of_node(dn.node_page), + ofs_of_node(page)); + err = -EFSCORRUPTED; goto err; } @@ -569,13 +565,13 @@ retry_dn: if (__is_valid_data_blkaddr(src) && !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto err; } if (__is_valid_data_blkaddr(dest) && !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { - err = -EFAULT; + err = -EFSCORRUPTED; goto err; } @@ -642,11 +638,9 @@ retry_prev: err: f2fs_put_dnode(&dn); out: - f2fs_msg(sbi->sb, KERN_NOTICE, - "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", - inode->i_ino, - file_keep_isize(inode) ? "keep" : "recover", - recovered, err); + f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", + inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", + recovered, err); return err; } @@ -734,8 +728,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) #endif if (s_flags & SB_RDONLY) { - f2fs_msg(sbi->sb, KERN_INFO, - "recover fsync data on readonly fs"); + f2fs_info(sbi, "recover fsync data on readonly fs"); sbi->sb->s_flags &= ~SB_RDONLY; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8dee063c833f..a661ac32e829 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -546,9 +546,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) if (test_opt(sbi, DATA_FLUSH)) { struct blk_plug plug; + mutex_lock(&sbi->flush_lock); + blk_start_plug(&plug); f2fs_sync_dirty_inodes(sbi, FILE_INODE); blk_finish_plug(&plug); + + mutex_unlock(&sbi->flush_lock); } f2fs_sync_fs(sbi->sb, true); stat_inc_bg_cp_count(sbi->stat_info); @@ -869,11 +873,14 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) mutex_unlock(&dirty_i->seglist_lock); } -int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) +block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) { + int ovp_hole_segs = + (overprovision_segments(sbi) - reserved_segments(sbi)); + block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg; struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg; block_t holes[2] = {0, 0}; /* DATA and NODE */ + block_t unusable; struct seg_entry *se; unsigned int segno; @@ -887,10 +894,20 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) } mutex_unlock(&dirty_i->seglist_lock); - if (holes[DATA] > ovp || holes[NODE] > ovp) + unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE]; + if (unusable > ovp_holes) + return unusable - ovp_holes; + return 0; +} + +int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) +{ + int ovp_hole_segs = + (overprovision_segments(sbi) - reserved_segments(sbi)); + if (unusable > F2FS_OPTION(sbi).unusable_cap) return -EAGAIN; if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && - dirty_segments(sbi) > overprovision_segments(sbi)) + dirty_segments(sbi) > ovp_hole_segs) return -EAGAIN; return 0; } @@ -1480,6 +1497,10 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); + if (dpolicy->timeout != 0 && + f2fs_time_over(sbi, dpolicy->timeout)) + break; + if (dpolicy->io_aware && i < dpolicy->io_aware_gran && !is_idle(sbi, DISCARD_TIME)) { io_interrupted = true; @@ -1740,8 +1761,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, devi = f2fs_target_device_index(sbi, blkstart); if (blkstart < FDEV(devi).start_blk || blkstart > FDEV(devi).end_blk) { - f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x", - blkstart); + f2fs_err(sbi, "Invalid block %x", blkstart); return -EIO; } blkstart -= FDEV(devi).start_blk; @@ -1754,10 +1774,9 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, if (sector & (bdev_zone_sectors(bdev) - 1) || nr_sects != bdev_zone_sectors(bdev)) { - f2fs_msg(sbi->sb, KERN_ERR, - "(%d) %s: Unaligned zone reset attempted (block %x + %x)", - devi, sbi->s_ndevs ? FDEV(devi).path: "", - blkstart, blklen); + f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", + devi, sbi->s_ndevs ? FDEV(devi).path : "", + blkstart, blklen); return -EIO; } trace_f2fs_issue_reset_zone(bdev, blkstart); @@ -2121,15 +2140,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) mir_exist = f2fs_test_and_set_bit(offset, se->cur_valid_map_mir); if (unlikely(exist != mir_exist)) { - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " - "when setting bitmap, blk:%u, old bit:%d", - blkaddr, exist); + f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", + blkaddr, exist); f2fs_bug_on(sbi, 1); } #endif if (unlikely(exist)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Bitmap was wrongly set, blk:%u", blkaddr); + f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", + blkaddr); f2fs_bug_on(sbi, 1); se->valid_blocks--; del = 0; @@ -2150,15 +2168,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) mir_exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map_mir); if (unlikely(exist != mir_exist)) { - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " - "when clearing bitmap, blk:%u, old bit:%d", - blkaddr, exist); + f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", + blkaddr, exist); f2fs_bug_on(sbi, 1); } #endif if (unlikely(!exist)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Bitmap was wrongly cleared, blk:%u", blkaddr); + f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", + blkaddr); f2fs_bug_on(sbi, 1); se->valid_blocks++; del = 0; @@ -2640,6 +2657,39 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, stat_inc_seg_type(sbi, curseg); } +void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, + unsigned int start, unsigned int end) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int segno; + + down_read(&SM_I(sbi)->curseg_lock); + mutex_lock(&curseg->curseg_mutex); + down_write(&SIT_I(sbi)->sentry_lock); + + segno = CURSEG_I(sbi, type)->segno; + if (segno < start || segno > end) + goto unlock; + + if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type)) + change_curseg(sbi, type); + else + new_curseg(sbi, type, true); + + stat_inc_seg_type(sbi, curseg); + + locate_dirty_segment(sbi, segno); +unlock: + up_write(&SIT_I(sbi)->sentry_lock); + + if (segno != curseg->segno) + f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", + type, segno, curseg->segno); + + mutex_unlock(&curseg->curseg_mutex); + up_read(&SM_I(sbi)->curseg_lock); +} + void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) { struct curseg_info *curseg; @@ -2772,9 +2822,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) goto out; if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { - f2fs_msg(sbi->sb, KERN_WARNING, - "Found FS corruption, run fsck to fix."); - return -EIO; + f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); + return -EFSCORRUPTED; } /* start/end segment number in main_area */ @@ -3197,12 +3246,17 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { set_sbi_flag(sbi, SBI_NEED_FSCK); - return -EFAULT; + f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", + __func__, segno); + return -EFSCORRUPTED; } stat_inc_inplace_blocks(fio->sbi); - err = f2fs_submit_page_bio(fio); + if (fio->bio) + err = f2fs_merge_page_bio(fio); + else + err = f2fs_submit_page_bio(fio); if (!err) { update_device_state(fio); f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); @@ -3393,6 +3447,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) seg_i = CURSEG_I(sbi, i); segno = le32_to_cpu(ckpt->cur_data_segno[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); + if (blk_off > ENTRIES_IN_SUM) { + f2fs_bug_on(sbi, 1); + f2fs_put_page(page, 1); + return -EFAULT; + } seg_i->next_segno = segno; reset_curseg(sbi, i, 0); seg_i->alloc_type = ckpt->alloc_type[i]; @@ -3530,8 +3589,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) /* sanity check for summary blocks */ if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || - sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) + sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { + f2fs_err(sbi, "invalid journal entries nats %u sits %u\n", + nats_in_cursum(nat_j), sits_in_cursum(sit_j)); return -EINVAL; + } return 0; } @@ -3762,7 +3824,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct f2fs_journal *journal = curseg->journal; struct sit_entry_set *ses, *tmp; struct list_head *head = &SM_I(sbi)->sit_entry_set; - bool to_journal = true; + bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); struct seg_entry *se; down_write(&sit_i->sentry_lock); @@ -3781,7 +3843,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * entries, remove all entries from journal and add and account * them in sit entry set. */ - if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) + if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) || + !to_journal) remove_sits_in_journal(sbi); /* @@ -4096,11 +4159,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) start = le32_to_cpu(segno_in_journal(journal, i)); if (start >= MAIN_SEGS(sbi)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong journal entry on segno %u", - start); + f2fs_err(sbi, "Wrong journal entry on segno %u", + start); set_sbi_flag(sbi, SBI_NEED_FSCK); - err = -EINVAL; + err = -EFSCORRUPTED; break; } @@ -4137,11 +4199,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) up_read(&curseg->journal_rwsem); if (!err && total_node_blocks != valid_node_count(sbi)) { - f2fs_msg(sbi->sb, KERN_ERR, - "SIT is corrupted node# %u vs %u", - total_node_blocks, valid_node_count(sbi)); + f2fs_err(sbi, "SIT is corrupted node# %u vs %u", + total_node_blocks, valid_node_count(sbi)); set_sbi_flag(sbi, SBI_NEED_FSCK); - err = -EINVAL; + err = -EFSCORRUPTED; } return err; @@ -4232,6 +4293,39 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) return init_victim_secmap(sbi); } +static int sanity_check_curseg(struct f2fs_sb_info *sbi) +{ + int i; + + /* + * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr; + * In LFS curseg, all blkaddr after .next_blkoff should be unused. + */ + for (i = 0; i < NO_CHECK_TYPE; i++) { + struct curseg_info *curseg = CURSEG_I(sbi, i); + struct seg_entry *se = get_seg_entry(sbi, curseg->segno); + unsigned int blkofs = curseg->next_blkoff; + + if (f2fs_test_bit(blkofs, se->cur_valid_map)) + goto out; + + if (curseg->alloc_type == SSR) + continue; + + for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) { + if (!f2fs_test_bit(blkofs, se->cur_valid_map)) + continue; +out: + f2fs_err(sbi, + "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", + i, curseg->segno, curseg->alloc_type, + curseg->next_blkoff, blkofs); + return -EFSCORRUPTED; + } + } + return 0; +} + /* * Update min, max modified time for cost-benefit GC algorithm */ @@ -4327,6 +4421,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) if (err) return err; + err = sanity_check_curseg(sbi); + if (err) + return err; + init_min_max_mtime(sbi); return 0; } diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 429007b8036e..b74602813a05 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -109,7 +109,7 @@ #define START_SEGNO(segno) \ (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) #define SIT_BLK_CNT(sbi) \ - ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) + DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK) #define f2fs_bitmap_size(nr) \ (BITS_TO_LONGS(nr) * sizeof(unsigned long)) @@ -693,21 +693,19 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, } while (cur_pos < sbi->blocks_per_seg); if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Mismatch valid blocks %d vs. %d", - GET_SIT_VBLOCKS(raw_sit), valid_blocks); + f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", + GET_SIT_VBLOCKS(raw_sit), valid_blocks); set_sbi_flag(sbi, SBI_NEED_FSCK); - return -EINVAL; + return -EFSCORRUPTED; } /* check segment usage, and check boundary of a given segment number */ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg || segno > TOTAL_SEGS(sbi) - 1)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong valid blocks %d or segno %u", - GET_SIT_VBLOCKS(raw_sit), segno); + f2fs_err(sbi, "Wrong valid blocks %d or segno %u", + GET_SIT_VBLOCKS(raw_sit), segno); set_sbi_flag(sbi, SBI_NEED_FSCK); - return -EINVAL; + return -EFSCORRUPTED; } return 0; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 6b959bbb336a..d95a681ef7c9 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -136,7 +136,10 @@ enum { Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, - Opt_checkpoint, + Opt_checkpoint_disable, + Opt_checkpoint_disable_cap, + Opt_checkpoint_disable_cap_perc, + Opt_checkpoint_enable, Opt_err, }; @@ -195,45 +198,52 @@ static match_table_t f2fs_tokens = { {Opt_alloc, "alloc_mode=%s"}, {Opt_fsync, "fsync_mode=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, - {Opt_checkpoint, "checkpoint=%s"}, + {Opt_checkpoint_disable, "checkpoint=disable"}, + {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, + {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, + {Opt_checkpoint_enable, "checkpoint=enable"}, {Opt_err, NULL}, }; -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) +void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) { struct va_format vaf; va_list args; + int level; va_start(args, fmt); - vaf.fmt = fmt; + + level = printk_get_level(fmt); + vaf.fmt = printk_skip_level(fmt); vaf.va = &args; - printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); + printk("%c%cF2FS-fs (%s): %pV\n", + KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); + va_end(args); } static inline void limit_reserve_root(struct f2fs_sb_info *sbi) { - block_t limit = (sbi->user_block_count << 1) / 1000; + block_t limit = min((sbi->user_block_count << 1) / 1000, + sbi->user_block_count - sbi->reserved_blocks); /* limit is 0.2% */ if (test_opt(sbi, RESERVE_ROOT) && F2FS_OPTION(sbi).root_reserved_blocks > limit) { F2FS_OPTION(sbi).root_reserved_blocks = limit; - f2fs_msg(sbi->sb, KERN_INFO, - "Reduce reserved blocks for root = %u", - F2FS_OPTION(sbi).root_reserved_blocks); + f2fs_info(sbi, "Reduce reserved blocks for root = %u", + F2FS_OPTION(sbi).root_reserved_blocks); } if (!test_opt(sbi, RESERVE_ROOT) && (!uid_eq(F2FS_OPTION(sbi).s_resuid, make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || !gid_eq(F2FS_OPTION(sbi).s_resgid, make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) - f2fs_msg(sbi->sb, KERN_INFO, - "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", - from_kuid_munged(&init_user_ns, - F2FS_OPTION(sbi).s_resuid), - from_kgid_munged(&init_user_ns, - F2FS_OPTION(sbi).s_resgid)); + f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", + from_kuid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resuid), + from_kgid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resgid)); } static void init_once(void *foo) @@ -254,35 +264,29 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, int ret = -EINVAL; if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { - f2fs_msg(sb, KERN_ERR, - "Cannot change journaled " - "quota options when quota turned on"); + f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); return -EINVAL; } if (f2fs_sb_has_quota_ino(sbi)) { - f2fs_msg(sb, KERN_INFO, - "QUOTA feature is enabled, so ignore qf_name"); + f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); return 0; } qname = match_strdup(args); if (!qname) { - f2fs_msg(sb, KERN_ERR, - "Not enough memory for storing quotafile name"); + f2fs_err(sbi, "Not enough memory for storing quotafile name"); return -ENOMEM; } if (F2FS_OPTION(sbi).s_qf_names[qtype]) { if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) ret = 0; else - f2fs_msg(sb, KERN_ERR, - "%s quota file already specified", + f2fs_err(sbi, "%s quota file already specified", QTYPE2NAME(qtype)); goto errout; } if (strchr(qname, '/')) { - f2fs_msg(sb, KERN_ERR, - "quotafile must be on filesystem root"); + f2fs_err(sbi, "quotafile must be on filesystem root"); goto errout; } F2FS_OPTION(sbi).s_qf_names[qtype] = qname; @@ -298,8 +302,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype) struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { - f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" - " when quota turned on"); + f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); return -EINVAL; } kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); @@ -315,8 +318,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) * to support legacy quotas in quota files. */ if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { - f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. " - "Cannot enable project quota enforcement."); + f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); return -1; } if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || @@ -336,21 +338,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || test_opt(sbi, PRJQUOTA)) { - f2fs_msg(sbi->sb, KERN_ERR, "old and new quota " - "format mixing"); + f2fs_err(sbi, "old and new quota format mixing"); return -1; } if (!F2FS_OPTION(sbi).s_jquota_fmt) { - f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " - "not specified"); + f2fs_err(sbi, "journaled quota format not specified"); return -1; } } if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { - f2fs_msg(sbi->sb, KERN_INFO, - "QUOTA feature is enabled, so ignore jquota_fmt"); + f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); F2FS_OPTION(sbi).s_jquota_fmt = 0; } return 0; @@ -418,8 +417,7 @@ static int parse_options(struct super_block *sb, char *options) break; case Opt_nodiscard: if (f2fs_sb_has_blkzoned(sbi)) { - f2fs_msg(sb, KERN_WARNING, - "discard is required for zoned block devices"); + f2fs_warn(sbi, "discard is required for zoned block devices"); return -EINVAL; } clear_opt(sbi, DISCARD); @@ -451,20 +449,16 @@ static int parse_options(struct super_block *sb, char *options) break; #else case Opt_user_xattr: - f2fs_msg(sb, KERN_INFO, - "user_xattr options not supported"); + f2fs_info(sbi, "user_xattr options not supported"); break; case Opt_nouser_xattr: - f2fs_msg(sb, KERN_INFO, - "nouser_xattr options not supported"); + f2fs_info(sbi, "nouser_xattr options not supported"); break; case Opt_inline_xattr: - f2fs_msg(sb, KERN_INFO, - "inline_xattr options not supported"); + f2fs_info(sbi, "inline_xattr options not supported"); break; case Opt_noinline_xattr: - f2fs_msg(sb, KERN_INFO, - "noinline_xattr options not supported"); + f2fs_info(sbi, "noinline_xattr options not supported"); break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL @@ -476,10 +470,10 @@ static int parse_options(struct super_block *sb, char *options) break; #else case Opt_acl: - f2fs_msg(sb, KERN_INFO, "acl options not supported"); + f2fs_info(sbi, "acl options not supported"); break; case Opt_noacl: - f2fs_msg(sb, KERN_INFO, "noacl options not supported"); + f2fs_info(sbi, "noacl options not supported"); break; #endif case Opt_active_logs: @@ -529,9 +523,8 @@ static int parse_options(struct super_block *sb, char *options) if (args->from && match_int(args, &arg)) return -EINVAL; if (test_opt(sbi, RESERVE_ROOT)) { - f2fs_msg(sb, KERN_INFO, - "Preserve previous reserve_root=%u", - F2FS_OPTION(sbi).root_reserved_blocks); + f2fs_info(sbi, "Preserve previous reserve_root=%u", + F2FS_OPTION(sbi).root_reserved_blocks); } else { F2FS_OPTION(sbi).root_reserved_blocks = arg; set_opt(sbi, RESERVE_ROOT); @@ -542,8 +535,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; uid = make_kuid(current_user_ns(), arg); if (!uid_valid(uid)) { - f2fs_msg(sb, KERN_ERR, - "Invalid uid value %d", arg); + f2fs_err(sbi, "Invalid uid value %d", arg); return -EINVAL; } F2FS_OPTION(sbi).s_resuid = uid; @@ -553,8 +545,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; gid = make_kgid(current_user_ns(), arg); if (!gid_valid(gid)) { - f2fs_msg(sb, KERN_ERR, - "Invalid gid value %d", arg); + f2fs_err(sbi, "Invalid gid value %d", arg); return -EINVAL; } F2FS_OPTION(sbi).s_resgid = gid; @@ -567,9 +558,7 @@ static int parse_options(struct super_block *sb, char *options) if (strlen(name) == 8 && !strncmp(name, "adaptive", 8)) { if (f2fs_sb_has_blkzoned(sbi)) { - f2fs_msg(sb, KERN_WARNING, - "adaptive mode is not allowed with " - "zoned block device feature"); + f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); kvfree(name); return -EINVAL; } @@ -587,9 +576,8 @@ static int parse_options(struct super_block *sb, char *options) if (args->from && match_int(args, &arg)) return -EINVAL; if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) { - f2fs_msg(sb, KERN_WARNING, - "Not support %d, larger than %d", - 1 << arg, BIO_MAX_PAGES); + f2fs_warn(sbi, "Not support %d, larger than %d", + 1 << arg, BIO_MAX_PAGES); return -EINVAL; } F2FS_OPTION(sbi).write_io_size_bits = arg; @@ -610,13 +598,11 @@ static int parse_options(struct super_block *sb, char *options) break; #else case Opt_fault_injection: - f2fs_msg(sb, KERN_INFO, - "fault_injection options not supported"); + f2fs_info(sbi, "fault_injection options not supported"); break; case Opt_fault_type: - f2fs_msg(sb, KERN_INFO, - "fault_type options not supported"); + f2fs_info(sbi, "fault_type options not supported"); break; #endif case Opt_lazytime: @@ -696,8 +682,7 @@ static int parse_options(struct super_block *sb, char *options) case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: case Opt_noquota: - f2fs_msg(sb, KERN_INFO, - "quota operations not supported"); + f2fs_info(sbi, "quota operations not supported"); break; #endif case Opt_whint: @@ -759,39 +744,44 @@ static int parse_options(struct super_block *sb, char *options) case Opt_test_dummy_encryption: #ifdef CONFIG_FS_ENCRYPTION if (!f2fs_sb_has_encrypt(sbi)) { - f2fs_msg(sb, KERN_ERR, "Encrypt feature is off"); + f2fs_err(sbi, "Encrypt feature is off"); return -EINVAL; } F2FS_OPTION(sbi).test_dummy_encryption = true; - f2fs_msg(sb, KERN_INFO, - "Test dummy encryption mode enabled"); + f2fs_info(sbi, "Test dummy encryption mode enabled"); #else - f2fs_msg(sb, KERN_INFO, - "Test dummy encryption mount option ignored"); + f2fs_info(sbi, "Test dummy encryption mount option ignored"); #endif break; - case Opt_checkpoint: - name = match_strdup(&args[0]); - if (!name) - return -ENOMEM; - - if (strlen(name) == 6 && - !strncmp(name, "enable", 6)) { - clear_opt(sbi, DISABLE_CHECKPOINT); - } else if (strlen(name) == 7 && - !strncmp(name, "disable", 7)) { - set_opt(sbi, DISABLE_CHECKPOINT); - } else { - kvfree(name); + case Opt_checkpoint_disable_cap_perc: + if (args->from && match_int(args, &arg)) return -EINVAL; - } - kvfree(name); + if (arg < 0 || arg > 100) + return -EINVAL; + if (arg == 100) + F2FS_OPTION(sbi).unusable_cap = + sbi->user_block_count; + else + F2FS_OPTION(sbi).unusable_cap = + (sbi->user_block_count / 100) * arg; + set_opt(sbi, DISABLE_CHECKPOINT); + break; + case Opt_checkpoint_disable_cap: + if (args->from && match_int(args, &arg)) + return -EINVAL; + F2FS_OPTION(sbi).unusable_cap = arg; + set_opt(sbi, DISABLE_CHECKPOINT); + break; + case Opt_checkpoint_disable: + set_opt(sbi, DISABLE_CHECKPOINT); + break; + case Opt_checkpoint_enable: + clear_opt(sbi, DISABLE_CHECKPOINT); break; default: - f2fs_msg(sb, KERN_ERR, - "Unrecognized mount option \"%s\" or missing value", - p); + f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", + p); return -EINVAL; } } @@ -800,23 +790,18 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; #else if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { - f2fs_msg(sbi->sb, KERN_INFO, - "Filesystem with quota feature cannot be mounted RDWR " - "without CONFIG_QUOTA"); + f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); return -EINVAL; } if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { - f2fs_msg(sb, KERN_ERR, - "Filesystem with project quota feature cannot be " - "mounted RDWR without CONFIG_QUOTA"); + f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); return -EINVAL; } #endif if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { - f2fs_msg(sb, KERN_ERR, - "Should set mode=lfs with %uKB-sized IO", - F2FS_IO_SIZE_KB(sbi)); + f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", + F2FS_IO_SIZE_KB(sbi)); return -EINVAL; } @@ -825,15 +810,11 @@ static int parse_options(struct super_block *sb, char *options) if (!f2fs_sb_has_extra_attr(sbi) || !f2fs_sb_has_flexible_inline_xattr(sbi)) { - f2fs_msg(sb, KERN_ERR, - "extra_attr or flexible_inline_xattr " - "feature is off"); + f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); return -EINVAL; } if (!test_opt(sbi, INLINE_XATTR)) { - f2fs_msg(sb, KERN_ERR, - "inline_xattr_size option should be " - "set with inline_xattr option"); + f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); return -EINVAL; } @@ -842,16 +823,14 @@ static int parse_options(struct super_block *sb, char *options) if (F2FS_OPTION(sbi).inline_xattr_size < min_size || F2FS_OPTION(sbi).inline_xattr_size > max_size) { - f2fs_msg(sb, KERN_ERR, - "inline xattr size is out of range: %d ~ %d", - min_size, max_size); + f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", + min_size, max_size); return -EINVAL; } } if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) { - f2fs_msg(sb, KERN_ERR, - "LFS not compatible with checkpoint=disable\n"); + f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n"); return -EINVAL; } @@ -1313,6 +1292,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",disable_roll_forward"); if (test_opt(sbi, DISCARD)) seq_puts(seq, ",discard"); + else + seq_puts(seq, ",nodiscard"); if (test_opt(sbi, NOHEAP)) seq_puts(seq, ",no_heap"); else @@ -1409,8 +1390,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",alloc_mode=%s", "reuse"); if (test_opt(sbi, DISABLE_CHECKPOINT)) - seq_puts(seq, ",checkpoint=disable"); - + seq_printf(seq, ",checkpoint=disable:%u", + F2FS_OPTION(sbi).unusable_cap); if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) seq_printf(seq, ",fsync_mode=%s", "posix"); else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) @@ -1439,6 +1420,7 @@ static void default_options(struct f2fs_sb_info *sbi) set_opt(sbi, EXTENT_CACHE); set_opt(sbi, NOHEAP); clear_opt(sbi, DISABLE_CHECKPOINT); + F2FS_OPTION(sbi).unusable_cap = 0; sbi->sb->s_flags |= SB_LAZYTIME; set_opt(sbi, FLUSH_MERGE); set_opt(sbi, DISCARD); @@ -1467,10 +1449,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) struct cp_control cpc; int err = 0; int ret; + block_t unusable; if (s_flags & SB_RDONLY) { - f2fs_msg(sbi->sb, KERN_ERR, - "checkpoint=disable on readonly fs"); + f2fs_err(sbi, "checkpoint=disable on readonly fs"); return -EINVAL; } sbi->sb->s_flags |= SB_ACTIVE; @@ -1494,7 +1476,8 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) goto restore_flag; } - if (f2fs_disable_cp_again(sbi)) { + unusable = f2fs_get_unusable_blocks(sbi); + if (f2fs_disable_cp_again(sbi, unusable)) { err = -EAGAIN; goto restore_flag; } @@ -1507,7 +1490,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) goto out_unlock; spin_lock(&sbi->stat_lock); - sbi->unusable_block_count = 0; + sbi->unusable_block_count = unusable; spin_unlock(&sbi->stat_lock); out_unlock: @@ -1572,8 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) /* recover superblocks we couldn't write due to previous RO mount */ if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { err = f2fs_commit_super(sbi, false); - f2fs_msg(sb, KERN_INFO, - "Try to recover all the superblocks, ret: %d", err); + f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", + err); if (!err) clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); } @@ -1614,15 +1597,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) /* disallow enable/disable extent_cache dynamically */ if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { err = -EINVAL; - f2fs_msg(sbi->sb, KERN_WARNING, - "switch extent_cache option is not allowed"); + f2fs_warn(sbi, "switch extent_cache option is not allowed"); goto restore_opts; } if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { err = -EINVAL; - f2fs_msg(sbi->sb, KERN_WARNING, - "disabling checkpoint not compatible with read-only"); + f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); goto restore_opts; } @@ -1692,8 +1673,7 @@ skip: restore_gc: if (need_restart_gc) { if (f2fs_start_gc_thread(sbi)) - f2fs_msg(sbi->sb, KERN_WARNING, - "background gc thread has stopped"); + f2fs_warn(sbi, "background gc thread has stopped"); } else if (need_stop_gc) { f2fs_stop_gc_thread(sbi); } @@ -1832,8 +1812,7 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode) static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) { if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { - f2fs_msg(sbi->sb, KERN_ERR, - "quota sysfile may be corrupted, skip loading it"); + f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); return 0; } @@ -1849,8 +1828,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) if (f2fs_sb_has_quota_ino(sbi) && rdonly) { err = f2fs_enable_quotas(sbi->sb); if (err) { - f2fs_msg(sbi->sb, KERN_ERR, - "Cannot turn on quota_ino: %d", err); + f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); return 0; } return 1; @@ -1863,8 +1841,8 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) enabled = 1; continue; } - f2fs_msg(sbi->sb, KERN_ERR, - "Cannot turn on quotas: %d on %d", err, i); + f2fs_err(sbi, "Cannot turn on quotas: %d on %d", + err, i); } } return enabled; @@ -1885,8 +1863,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, qf_inode = f2fs_iget(sb, qf_inum); if (IS_ERR(qf_inode)) { - f2fs_msg(sb, KERN_ERR, - "Bad quota inode %u:%lu", type, qf_inum); + f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum); return PTR_ERR(qf_inode); } @@ -1899,17 +1876,17 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, static int f2fs_enable_quotas(struct super_block *sb) { + struct f2fs_sb_info *sbi = F2FS_SB(sb); int type, err = 0; unsigned long qf_inum; bool quota_mopt[MAXQUOTAS] = { - test_opt(F2FS_SB(sb), USRQUOTA), - test_opt(F2FS_SB(sb), GRPQUOTA), - test_opt(F2FS_SB(sb), PRJQUOTA), + test_opt(sbi, USRQUOTA), + test_opt(sbi, GRPQUOTA), + test_opt(sbi, PRJQUOTA), }; if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) { - f2fs_msg(sb, KERN_ERR, - "quota file may be corrupted, skip loading it"); + f2fs_err(sbi, "quota file may be corrupted, skip loading it"); return 0; } @@ -1922,10 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb) DQUOT_USAGE_ENABLED | (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); if (err) { - f2fs_msg(sb, KERN_ERR, - "Failed to enable quota tracking " - "(type=%d, err=%d). Please run " - "fsck to fix.", type, err); + f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", + type, err); for (type--; type >= 0; type--) dquot_quota_off(sb, type); set_sbi_flag(F2FS_SB(sb), @@ -1944,6 +1919,18 @@ int f2fs_quota_sync(struct super_block *sb, int type) int cnt; int ret; + /* + * do_quotactl + * f2fs_quota_sync + * down_read(quota_sem) + * dquot_writeback_dquots() + * f2fs_dquot_commit + * block_operation + * down_read(quota_sem) + */ + f2fs_lock_op(sbi); + + down_read(&sbi->quota_sem); ret = dquot_writeback_dquots(sb, type); if (ret) goto out; @@ -1981,6 +1968,8 @@ int f2fs_quota_sync(struct super_block *sb, int type) out: if (ret) set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); + up_read(&sbi->quota_sem); + f2fs_unlock_op(sbi); return ret; } @@ -2045,10 +2034,8 @@ void f2fs_quota_off_umount(struct super_block *sb) if (err) { int ret = dquot_quota_off(sb, type); - f2fs_msg(sb, KERN_ERR, - "Fail to turn off disk quota " - "(type: %d, err: %d, ret:%d), Please " - "run fsck to fix it.", type, err, ret); + f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.", + type, err, ret); set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); } } @@ -2074,32 +2061,40 @@ static void f2fs_truncate_quota_inode_pages(struct super_block *sb) static int f2fs_dquot_commit(struct dquot *dquot) { + struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); int ret; + down_read(&sbi->quota_sem); ret = dquot_commit(dquot); if (ret < 0) - set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + up_read(&sbi->quota_sem); return ret; } static int f2fs_dquot_acquire(struct dquot *dquot) { + struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); int ret; + down_read(&sbi->quota_sem); ret = dquot_acquire(dquot); if (ret < 0) - set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); - + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + up_read(&sbi->quota_sem); return ret; } static int f2fs_dquot_release(struct dquot *dquot) { + struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); int ret; + down_read(&sbi->quota_sem); ret = dquot_release(dquot); if (ret < 0) - set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + up_read(&sbi->quota_sem); return ret; } @@ -2109,22 +2104,27 @@ static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot) struct f2fs_sb_info *sbi = F2FS_SB(sb); int ret; + down_read(&sbi->quota_sem); ret = dquot_mark_dquot_dirty(dquot); /* if we are using journalled quota */ if (is_journalled_quota(sbi)) set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); + up_read(&sbi->quota_sem); return ret; } static int f2fs_dquot_commit_info(struct super_block *sb, int type) { + struct f2fs_sb_info *sbi = F2FS_SB(sb); int ret; + down_read(&sbi->quota_sem); ret = dquot_commit_info(sb, type); if (ret < 0) - set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + up_read(&sbi->quota_sem); return ret; } @@ -2341,55 +2341,49 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, (segment_count << log_blocks_per_seg); if (segment0_blkaddr != cp_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Mismatch start address, segment0(%u) cp_blkaddr(%u)", - segment0_blkaddr, cp_blkaddr); + f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", + segment0_blkaddr, cp_blkaddr); return true; } if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != sit_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Wrong CP boundary, start(%u) end(%u) blocks(%u)", - cp_blkaddr, sit_blkaddr, - segment_count_ckpt << log_blocks_per_seg); + f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", + cp_blkaddr, sit_blkaddr, + segment_count_ckpt << log_blocks_per_seg); return true; } if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != nat_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", - sit_blkaddr, nat_blkaddr, - segment_count_sit << log_blocks_per_seg); + f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", + sit_blkaddr, nat_blkaddr, + segment_count_sit << log_blocks_per_seg); return true; } if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != ssa_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", - nat_blkaddr, ssa_blkaddr, - segment_count_nat << log_blocks_per_seg); + f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", + nat_blkaddr, ssa_blkaddr, + segment_count_nat << log_blocks_per_seg); return true; } if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != main_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", - ssa_blkaddr, main_blkaddr, - segment_count_ssa << log_blocks_per_seg); + f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", + ssa_blkaddr, main_blkaddr, + segment_count_ssa << log_blocks_per_seg); return true; } if (main_end_blkaddr > seg_end_blkaddr) { - f2fs_msg(sb, KERN_INFO, - "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", - main_blkaddr, - segment0_blkaddr + - (segment_count << log_blocks_per_seg), - segment_count_main << log_blocks_per_seg); + f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", + main_blkaddr, + segment0_blkaddr + + (segment_count << log_blocks_per_seg), + segment_count_main << log_blocks_per_seg); return true; } else if (main_end_blkaddr < seg_end_blkaddr) { int err = 0; @@ -2406,12 +2400,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, err = __f2fs_commit_super(bh, NULL); res = err ? "failed" : "done"; } - f2fs_msg(sb, KERN_INFO, - "Fix alignment : %s, start(%u) end(%u) block(%u)", - res, main_blkaddr, - segment0_blkaddr + - (segment_count << log_blocks_per_seg), - segment_count_main << log_blocks_per_seg); + f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)", + res, main_blkaddr, + segment0_blkaddr + + (segment_count << log_blocks_per_seg), + segment_count_main << log_blocks_per_seg); if (err) return true; } @@ -2425,7 +2418,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, block_t total_sections, blocks_per_seg; struct f2fs_super_block *raw_super = (struct f2fs_super_block *) (bh->b_data + F2FS_SUPER_OFFSET); - struct super_block *sb = sbi->sb; unsigned int blocksize; size_t crc_offset = 0; __u32 crc = 0; @@ -2435,48 +2427,42 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, crc_offset = le32_to_cpu(raw_super->checksum_offset); if (crc_offset != offsetof(struct f2fs_super_block, crc)) { - f2fs_msg(sb, KERN_INFO, - "Invalid SB checksum offset: %zu", - crc_offset); + f2fs_info(sbi, "Invalid SB checksum offset: %zu", + crc_offset); return 1; } crc = le32_to_cpu(raw_super->crc); if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { - f2fs_msg(sb, KERN_INFO, - "Invalid SB checksum value: %u", crc); + f2fs_info(sbi, "Invalid SB checksum value: %u", crc); return 1; } } if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { - f2fs_msg(sb, KERN_INFO, - "Magic Mismatch, valid(0x%x) - read(0x%x)", - F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); + f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); return 1; } /* Currently, support only 4KB page cache size */ if (F2FS_BLKSIZE != PAGE_SIZE) { - f2fs_msg(sb, KERN_INFO, - "Invalid page_cache_size (%lu), supports only 4KB", - PAGE_SIZE); + f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", + PAGE_SIZE); return 1; } /* Currently, support only 4KB block size */ blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); if (blocksize != F2FS_BLKSIZE) { - f2fs_msg(sb, KERN_INFO, - "Invalid blocksize (%u), supports only 4KB", - blocksize); + f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", + blocksize); return 1; } /* check log blocks per segment */ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { - f2fs_msg(sb, KERN_INFO, - "Invalid log blocks per segment (%u)", - le32_to_cpu(raw_super->log_blocks_per_seg)); + f2fs_info(sbi, "Invalid log blocks per segment (%u)", + le32_to_cpu(raw_super->log_blocks_per_seg)); return 1; } @@ -2485,17 +2471,16 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, F2FS_MAX_LOG_SECTOR_SIZE || le32_to_cpu(raw_super->log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) { - f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", - le32_to_cpu(raw_super->log_sectorsize)); + f2fs_info(sbi, "Invalid log sectorsize (%u)", + le32_to_cpu(raw_super->log_sectorsize)); return 1; } if (le32_to_cpu(raw_super->log_sectors_per_block) + le32_to_cpu(raw_super->log_sectorsize) != F2FS_MAX_LOG_SECTOR_SIZE) { - f2fs_msg(sb, KERN_INFO, - "Invalid log sectors per block(%u) log sectorsize(%u)", - le32_to_cpu(raw_super->log_sectors_per_block), - le32_to_cpu(raw_super->log_sectorsize)); + f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", + le32_to_cpu(raw_super->log_sectors_per_block), + le32_to_cpu(raw_super->log_sectorsize)); return 1; } @@ -2509,59 +2494,51 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (segment_count > F2FS_MAX_SEGMENT || segment_count < F2FS_MIN_SEGMENTS) { - f2fs_msg(sb, KERN_INFO, - "Invalid segment count (%u)", - segment_count); + f2fs_info(sbi, "Invalid segment count (%u)", segment_count); return 1; } if (total_sections > segment_count || total_sections < F2FS_MIN_SEGMENTS || segs_per_sec > segment_count || !segs_per_sec) { - f2fs_msg(sb, KERN_INFO, - "Invalid segment/section count (%u, %u x %u)", - segment_count, total_sections, segs_per_sec); + f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", + segment_count, total_sections, segs_per_sec); return 1; } if ((segment_count / segs_per_sec) < total_sections) { - f2fs_msg(sb, KERN_INFO, - "Small segment_count (%u < %u * %u)", - segment_count, segs_per_sec, total_sections); + f2fs_info(sbi, "Small segment_count (%u < %u * %u)", + segment_count, segs_per_sec, total_sections); return 1; } if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { - f2fs_msg(sb, KERN_INFO, - "Wrong segment_count / block_count (%u > %llu)", - segment_count, le64_to_cpu(raw_super->block_count)); + f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", + segment_count, le64_to_cpu(raw_super->block_count)); return 1; } if (secs_per_zone > total_sections || !secs_per_zone) { - f2fs_msg(sb, KERN_INFO, - "Wrong secs_per_zone / total_sections (%u, %u)", - secs_per_zone, total_sections); + f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", + secs_per_zone, total_sections); return 1; } if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || raw_super->hot_ext_count > F2FS_MAX_EXTENSION || (le32_to_cpu(raw_super->extension_count) + raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) { - f2fs_msg(sb, KERN_INFO, - "Corrupted extension count (%u + %u > %u)", - le32_to_cpu(raw_super->extension_count), - raw_super->hot_ext_count, - F2FS_MAX_EXTENSION); + f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", + le32_to_cpu(raw_super->extension_count), + raw_super->hot_ext_count, + F2FS_MAX_EXTENSION); return 1; } if (le32_to_cpu(raw_super->cp_payload) > (blocks_per_seg - F2FS_CP_PACKS)) { - f2fs_msg(sb, KERN_INFO, - "Insane cp_payload (%u > %u)", - le32_to_cpu(raw_super->cp_payload), - blocks_per_seg - F2FS_CP_PACKS); + f2fs_info(sbi, "Insane cp_payload (%u > %u)", + le32_to_cpu(raw_super->cp_payload), + blocks_per_seg - F2FS_CP_PACKS); return 1; } @@ -2569,11 +2546,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (le32_to_cpu(raw_super->node_ino) != 1 || le32_to_cpu(raw_super->meta_ino) != 2 || le32_to_cpu(raw_super->root_ino) != 3) { - f2fs_msg(sb, KERN_INFO, - "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", - le32_to_cpu(raw_super->node_ino), - le32_to_cpu(raw_super->meta_ino), - le32_to_cpu(raw_super->root_ino)); + f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", + le32_to_cpu(raw_super->node_ino), + le32_to_cpu(raw_super->meta_ino), + le32_to_cpu(raw_super->root_ino)); return 1; } @@ -2617,8 +2593,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || ovp_segments == 0 || reserved_segments == 0)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong layout: check mkfs.f2fs version"); + f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); return 1; } @@ -2627,16 +2602,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); if (!user_block_count || user_block_count >= segment_count_main << log_blocks_per_seg) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong user_block_count: %u", user_block_count); + f2fs_err(sbi, "Wrong user_block_count: %u", + user_block_count); return 1; } valid_user_blocks = le64_to_cpu(ckpt->valid_block_count); if (valid_user_blocks > user_block_count) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong valid_user_blocks: %u, user_block_count: %u", - valid_user_blocks, user_block_count); + f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", + valid_user_blocks, user_block_count); return 1; } @@ -2644,9 +2618,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) avail_node_count = sbi->total_node_count - sbi->nquota_files - F2FS_RESERVED_NODE_NUM; if (valid_node_count > avail_node_count) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong valid_node_count: %u, avail_node_count: %u", - valid_node_count, avail_node_count); + f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", + valid_node_count, avail_node_count); return 1; } @@ -2660,10 +2633,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) { if (le32_to_cpu(ckpt->cur_node_segno[i]) == le32_to_cpu(ckpt->cur_node_segno[j])) { - f2fs_msg(sbi->sb, KERN_ERR, - "Node segment (%u, %u) has the same " - "segno: %u", i, j, - le32_to_cpu(ckpt->cur_node_segno[i])); + f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", + i, j, + le32_to_cpu(ckpt->cur_node_segno[i])); return 1; } } @@ -2675,10 +2647,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) { if (le32_to_cpu(ckpt->cur_data_segno[i]) == le32_to_cpu(ckpt->cur_data_segno[j])) { - f2fs_msg(sbi->sb, KERN_ERR, - "Data segment (%u, %u) has the same " - "segno: %u", i, j, - le32_to_cpu(ckpt->cur_data_segno[i])); + f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", + i, j, + le32_to_cpu(ckpt->cur_data_segno[i])); return 1; } } @@ -2687,10 +2658,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { if (le32_to_cpu(ckpt->cur_node_segno[i]) == le32_to_cpu(ckpt->cur_data_segno[j])) { - f2fs_msg(sbi->sb, KERN_ERR, - "Data segment (%u) and Data segment (%u)" - " has the same segno: %u", i, j, - le32_to_cpu(ckpt->cur_node_segno[i])); + f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u", + i, j, + le32_to_cpu(ckpt->cur_node_segno[i])); return 1; } } @@ -2701,9 +2671,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong bitmap size: sit: %u, nat:%u", - sit_bitmap_size, nat_bitmap_size); + f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", + sit_bitmap_size, nat_bitmap_size); return 1; } @@ -2712,14 +2681,22 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) if (cp_pack_start_sum < cp_payload + 1 || cp_pack_start_sum > blocks_per_seg - 1 - NR_CURSEG_TYPE) { - f2fs_msg(sbi->sb, KERN_ERR, - "Wrong cp_pack_start_sum: %u", - cp_pack_start_sum); + f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", + cp_pack_start_sum); + return 1; + } + + if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) && + le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) { + f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " + "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, " + "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"", + le32_to_cpu(ckpt->checksum_offset)); return 1; } if (unlikely(f2fs_cp_error(sbi))) { - f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); + f2fs_err(sbi, "A bug case: need to run fsck"); return 1; } return 0; @@ -2888,18 +2865,17 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, for (block = 0; block < 2; block++) { bh = sb_bread(sb, block); if (!bh) { - f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", - block + 1); + f2fs_err(sbi, "Unable to read %dth superblock", + block + 1); err = -EIO; continue; } /* sanity checking of raw super */ if (sanity_check_raw_super(sbi, bh)) { - f2fs_msg(sb, KERN_ERR, - "Can't find valid F2FS filesystem in %dth superblock", - block + 1); - err = -EINVAL; + f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", + block + 1); + err = -EFSCORRUPTED; brelse(bh); continue; } @@ -3028,36 +3004,32 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) #ifdef CONFIG_BLK_DEV_ZONED if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && !f2fs_sb_has_blkzoned(sbi)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Zoned block device feature not enabled\n"); + f2fs_err(sbi, "Zoned block device feature not enabled\n"); return -EINVAL; } if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) { if (init_blkz_info(sbi, i)) { - f2fs_msg(sbi->sb, KERN_ERR, - "Failed to initialize F2FS blkzone information"); + f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); return -EINVAL; } if (max_devices == 1) break; - f2fs_msg(sbi->sb, KERN_INFO, - "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", - i, FDEV(i).path, - FDEV(i).total_segments, - FDEV(i).start_blk, FDEV(i).end_blk, - bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? - "Host-aware" : "Host-managed"); + f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", + i, FDEV(i).path, + FDEV(i).total_segments, + FDEV(i).start_blk, FDEV(i).end_blk, + bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? + "Host-aware" : "Host-managed"); continue; } #endif - f2fs_msg(sbi->sb, KERN_INFO, - "Mount Device [%2d]: %20s, %8u, %8x - %8x", - i, FDEV(i).path, - FDEV(i).total_segments, - FDEV(i).start_blk, FDEV(i).end_blk); - } - f2fs_msg(sbi->sb, KERN_INFO, - "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); + f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", + i, FDEV(i).path, + FDEV(i).total_segments, + FDEV(i).start_blk, FDEV(i).end_blk); + } + f2fs_info(sbi, + "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); return 0; } @@ -3103,7 +3075,7 @@ try_onemore: /* Load the checksum driver */ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { - f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); + f2fs_err(sbi, "Cannot load crc32 driver."); err = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto free_sbi; @@ -3111,7 +3083,7 @@ try_onemore: /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { - f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); + f2fs_err(sbi, "unable to set blocksize"); goto free_sbi; } @@ -3135,8 +3107,7 @@ try_onemore: */ #ifndef CONFIG_BLK_DEV_ZONED if (f2fs_sb_has_blkzoned(sbi)) { - f2fs_msg(sb, KERN_ERR, - "Zoned block device support is not enabled"); + f2fs_err(sbi, "Zoned block device support is not enabled"); err = -EOPNOTSUPP; goto free_sb_buf; } @@ -3160,10 +3131,7 @@ try_onemore: #ifdef CONFIG_QUOTA sb->dq_op = &f2fs_quota_operations; - if (f2fs_sb_has_quota_ino(sbi)) - sb->s_qcop = &dquot_quotactl_sysfile_ops; - else - sb->s_qcop = &f2fs_quotactl_ops; + sb->s_qcop = &f2fs_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; if (f2fs_sb_has_quota_ino(sbi)) { @@ -3192,6 +3160,7 @@ try_onemore: mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); + mutex_init(&sbi->resize_mutex); init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_change); @@ -3227,6 +3196,7 @@ try_onemore: } init_rwsem(&sbi->cp_rwsem); + init_rwsem(&sbi->quota_sem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); @@ -3246,14 +3216,14 @@ try_onemore: /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { - f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); + f2fs_err(sbi, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_io_dummy; } err = f2fs_get_valid_checkpoint(sbi); if (err) { - f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); + f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } @@ -3264,10 +3234,13 @@ try_onemore: sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; } + if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) + set_sbi_flag(sbi, SBI_NEED_FSCK); + /* Initialize device list */ err = f2fs_scan_devices(sbi); if (err) { - f2fs_msg(sb, KERN_ERR, "Failed to find devices"); + f2fs_err(sbi, "Failed to find devices"); goto free_devices; } @@ -3287,6 +3260,7 @@ try_onemore: INIT_LIST_HEAD(&sbi->inode_list[i]); spin_lock_init(&sbi->inode_lock[i]); } + mutex_init(&sbi->flush_lock); f2fs_init_extent_cache_info(sbi); @@ -3297,14 +3271,14 @@ try_onemore: /* setup f2fs internal modules */ err = f2fs_build_segment_manager(sbi); if (err) { - f2fs_msg(sb, KERN_ERR, - "Failed to initialize F2FS segment manager"); + f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", + err); goto free_sm; } err = f2fs_build_node_manager(sbi); if (err) { - f2fs_msg(sb, KERN_ERR, - "Failed to initialize F2FS node manager"); + f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", + err); goto free_nm; } @@ -3329,7 +3303,7 @@ try_onemore: /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { - f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); + f2fs_err(sbi, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_stats; } @@ -3337,7 +3311,7 @@ try_onemore: /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { - f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); + f2fs_err(sbi, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } @@ -3363,8 +3337,7 @@ try_onemore: if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { err = f2fs_enable_quotas(sb); if (err) - f2fs_msg(sb, KERN_ERR, - "Cannot turn on quotas: error %d", err); + f2fs_err(sbi, "Cannot turn on quotas: error %d", err); } #endif /* if there are nt orphan nodes free them */ @@ -3384,13 +3357,10 @@ try_onemore: if (f2fs_hw_is_readonly(sbi)) { if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { err = -EROFS; - f2fs_msg(sb, KERN_ERR, - "Need to recover fsync data, but " - "write access unavailable"); + f2fs_err(sbi, "Need to recover fsync data, but write access unavailable"); goto free_meta; } - f2fs_msg(sbi->sb, KERN_INFO, "write access " - "unavailable, skipping recovery"); + f2fs_info(sbi, "write access unavailable, skipping recovery"); goto reset_checkpoint; } @@ -3405,8 +3375,8 @@ try_onemore: if (err != -ENOMEM) skip_recovery = true; need_fsck = true; - f2fs_msg(sb, KERN_ERR, - "Cannot recover all fsync data errno=%d", err); + f2fs_err(sbi, "Cannot recover all fsync data errno=%d", + err); goto free_meta; } } else { @@ -3414,8 +3384,7 @@ try_onemore: if (!f2fs_readonly(sb) && err > 0) { err = -EINVAL; - f2fs_msg(sb, KERN_ERR, - "Need to recover fsync data"); + f2fs_err(sbi, "Need to recover fsync data"); goto free_meta; } } @@ -3446,17 +3415,16 @@ reset_checkpoint: /* recover broken superblock */ if (recovery) { err = f2fs_commit_super(sbi, true); - f2fs_msg(sb, KERN_INFO, - "Try to recover %dth superblock, ret: %d", - sbi->valid_super_block ? 1 : 2, err); + f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", + sbi->valid_super_block ? 1 : 2, err); } f2fs_join_shrinker(sbi); f2fs_tuning_parameters(sbi); - f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", - cur_cp_version(F2FS_CKPT(sbi))); + f2fs_notice(sbi, "Mounted with checkpoint version = %llx", + cur_cp_version(F2FS_CKPT(sbi))); f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, REQ_TIME); clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 5c85166677d4..3aeacd0aacfd 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -68,6 +68,20 @@ static ssize_t dirty_segments_show(struct f2fs_attr *a, (unsigned long long)(dirty_segments(sbi))); } +static ssize_t unusable_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + block_t unusable; + + if (test_opt(sbi, DISABLE_CHECKPOINT)) + unusable = sbi->unusable_block_count; + else + unusable = f2fs_get_unusable_blocks(sbi); + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)unusable); +} + + static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -440,6 +454,7 @@ F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); F2FS_GENERAL_RO_ATTR(features); F2FS_GENERAL_RO_ATTR(current_reserved_blocks); +F2FS_GENERAL_RO_ATTR(unusable); #ifdef CONFIG_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); @@ -495,6 +510,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_type), #endif ATTR_LIST(dirty_segments), + ATTR_LIST(unusable), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(features), ATTR_LIST(reserved_blocks), @@ -568,8 +584,7 @@ static int __maybe_unused segment_info_seq_show(struct seq_file *seq, if ((i % 10) == 0) seq_printf(seq, "%-10d", i); - seq_printf(seq, "%d|%-3u", se->type, - get_valid_blocks(sbi, i, false)); + seq_printf(seq, "%d|%-3u", se->type, se->valid_blocks); if ((i % 10) == 9 || i == (total_segs - 1)) seq_putc(seq, '\n'); else @@ -595,8 +610,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, struct seg_entry *se = get_seg_entry(sbi, i); seq_printf(seq, "%-10d", i); - seq_printf(seq, "%d|%-3u|", se->type, - get_valid_blocks(sbi, i, false)); + seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks); for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) seq_printf(seq, " %.2x", se->cur_valid_map[j]); seq_putc(seq, '\n'); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index e791741d193b..b32c45621679 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -346,7 +346,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name); if (!*xe) { - err = -EFAULT; + f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr", + inode->i_ino); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + err = -EFSCORRUPTED; goto out; } check: @@ -622,7 +625,10 @@ static int __f2fs_setxattr(struct inode *inode, int index, /* find entry with wanted name. */ here = __find_xattr(base_addr, last_base_addr, index, len, name); if (!here) { - error = -EFAULT; + f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr", + inode->i_ino); + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); + error = -EFSCORRUPTED; goto exit; } |