From b93b41d4c7338dda9304eaac9d3b40da43198806 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 20 Nov 2014 12:19:11 -0500 Subject: ext4: kill ext4_kvfree() Signed-off-by: Al Viro Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2c9e6864abd9..4b79f39ebf66 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -176,15 +176,6 @@ void *ext4_kvzalloc(size_t size, gfp_t flags) return ret; } -void ext4_kvfree(void *ptr) -{ - if (is_vmalloc_addr(ptr)) - vfree(ptr); - else - kfree(ptr); - -} - ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { @@ -811,8 +802,8 @@ static void ext4_put_super(struct super_block *sb) for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); - ext4_kvfree(sbi->s_group_desc); - ext4_kvfree(sbi->s_flex_groups); + kvfree(sbi->s_group_desc); + kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); @@ -1939,7 +1930,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) memcpy(new_groups, sbi->s_flex_groups, (sbi->s_flex_groups_allocated * sizeof(struct flex_groups))); - ext4_kvfree(sbi->s_flex_groups); + kvfree(sbi->s_flex_groups); } sbi->s_flex_groups = new_groups; sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); @@ -4224,7 +4215,7 @@ failed_mount7: failed_mount6: ext4_mb_release(sb); if (sbi->s_flex_groups) - ext4_kvfree(sbi->s_flex_groups); + kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); @@ -4253,7 +4244,7 @@ failed_mount3: failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); - ext4_kvfree(sbi->s_group_desc); + kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); -- cgit v1.2.3 From edaa53cac8fd4b96ed4b8f96c4933158ff2dd337 Mon Sep 17 00:00:00 2001 From: Zheng Liu Date: Tue, 25 Nov 2014 11:45:37 -0500 Subject: ext4: change LRU to round-robin in extent status tree shrinker In this commit we discard the lru algorithm for inodes with extent status tree because it takes significant effort to maintain a lru list in extent status tree shrinker and the shrinker can take a long time to scan this lru list in order to reclaim some objects. We replace the lru ordering with a simple round-robin. After that we never need to keep a lru list. That means that the list needn't be sorted if the shrinker can not reclaim any objects in the first round. Cc: Andreas Dilger Signed-off-by: Zheng Liu Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 10 +- fs/ext4/extents.c | 4 +- fs/ext4/extents_status.c | 224 +++++++++++++++++++------------------------- fs/ext4/extents_status.h | 7 +- fs/ext4/inode.c | 4 +- fs/ext4/ioctl.c | 4 +- fs/ext4/super.c | 7 +- include/trace/events/ext4.h | 11 +-- 8 files changed, 118 insertions(+), 153 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 98da4cda9d18..ab6caf55f5bf 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -878,10 +878,9 @@ struct ext4_inode_info { /* extents status tree */ struct ext4_es_tree i_es_tree; rwlock_t i_es_lock; - struct list_head i_es_lru; + struct list_head i_es_list; unsigned int i_es_all_nr; /* protected by i_es_lock */ - unsigned int i_es_lru_nr; /* protected by i_es_lock */ - unsigned long i_touch_when; /* jiffies of last accessing */ + unsigned int i_es_shk_nr; /* protected by i_es_lock */ /* ialloc */ ext4_group_t i_last_alloc_group; @@ -1322,10 +1321,11 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; - struct list_head s_es_lru; + struct list_head s_es_list; + long s_es_nr_inode; struct ext4_es_stats s_es_stats; struct mb_cache *s_mb_cache; - spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; + spinlock_t s_es_lock ____cacheline_aligned_in_smp; /* Ratelimit ext4 messages. */ struct ratelimit_state s_err_ratelimit_state; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 1ee24d74270f..e406f66a903f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4632,7 +4632,7 @@ out2: trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); - ext4_es_lru_add(inode); + ext4_es_list_add(inode); return err ? err : allocated; } @@ -5191,7 +5191,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, error = ext4_fill_fiemap_extents(inode, start_blk, len_blks, fieinfo); } - ext4_es_lru_add(inode); + ext4_es_list_add(inode); return error; } diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 94e7855ae71b..0193ca107396 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -149,8 +149,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end); static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, int nr_to_scan); -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, - struct ext4_inode_info *locked_ei); +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, + struct ext4_inode_info *locked_ei); int __init ext4_init_es(void) { @@ -298,6 +298,36 @@ out: trace_ext4_es_find_delayed_extent_range_exit(inode, es); } +void ext4_es_list_add(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + + if (!list_empty(&ei->i_es_list)) + return; + + spin_lock(&sbi->s_es_lock); + if (list_empty(&ei->i_es_list)) { + list_add_tail(&ei->i_es_list, &sbi->s_es_list); + sbi->s_es_nr_inode++; + } + spin_unlock(&sbi->s_es_lock); +} + +void ext4_es_list_del(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + + spin_lock(&sbi->s_es_lock); + if (!list_empty(&ei->i_es_list)) { + list_del_init(&ei->i_es_list); + sbi->s_es_nr_inode--; + WARN_ON_ONCE(sbi->s_es_nr_inode < 0); + } + spin_unlock(&sbi->s_es_lock); +} + static struct extent_status * ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) @@ -314,9 +344,9 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, * We don't count delayed extent because we never try to reclaim them */ if (!ext4_es_is_delayed(es)) { - EXT4_I(inode)->i_es_lru_nr++; + EXT4_I(inode)->i_es_shk_nr++; percpu_counter_inc(&EXT4_SB(inode->i_sb)-> - s_es_stats.es_stats_lru_cnt); + s_es_stats.es_stats_shk_cnt); } EXT4_I(inode)->i_es_all_nr++; @@ -330,12 +360,12 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) EXT4_I(inode)->i_es_all_nr--; percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); - /* Decrease the lru counter when this es is not delayed */ + /* Decrease the shrink counter when this es is not delayed */ if (!ext4_es_is_delayed(es)) { - BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); - EXT4_I(inode)->i_es_lru_nr--; + BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); + EXT4_I(inode)->i_es_shk_nr--; percpu_counter_dec(&EXT4_SB(inode->i_sb)-> - s_es_stats.es_stats_lru_cnt); + s_es_stats.es_stats_shk_cnt); } kmem_cache_free(ext4_es_cachep, es); @@ -683,8 +713,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, goto error; retry: err = __es_insert_extent(inode, &newes); - if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1, - EXT4_I(inode))) + if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), + 1, EXT4_I(inode))) goto retry; if (err == -ENOMEM && !ext4_es_is_delayed(&newes)) err = 0; @@ -841,8 +871,8 @@ retry: es->es_lblk = orig_es.es_lblk; es->es_len = orig_es.es_len; if ((err == -ENOMEM) && - __ext4_es_shrink(EXT4_SB(inode->i_sb), 1, - EXT4_I(inode))) + __es_shrink(EXT4_SB(inode->i_sb), + 1, EXT4_I(inode))) goto retry; goto out; } @@ -914,6 +944,11 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, end = lblk + len - 1; BUG_ON(end < lblk); + /* + * ext4_clear_inode() depends on us taking i_es_lock unconditionally + * so that we are sure __es_shrink() is done with the inode before it + * is reclaimed. + */ write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end); write_unlock(&EXT4_I(inode)->i_es_lock); @@ -921,114 +956,80 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, return err; } -static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, - struct list_head *b) -{ - struct ext4_inode_info *eia, *eib; - eia = list_entry(a, struct ext4_inode_info, i_es_lru); - eib = list_entry(b, struct ext4_inode_info, i_es_lru); - - if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) && - !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED)) - return 1; - if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) && - ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED)) - return -1; - if (eia->i_touch_when == eib->i_touch_when) - return 0; - if (time_after(eia->i_touch_when, eib->i_touch_when)) - return 1; - else - return -1; -} - -static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, - struct ext4_inode_info *locked_ei) +static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, + struct ext4_inode_info *locked_ei) { struct ext4_inode_info *ei; struct ext4_es_stats *es_stats; - struct list_head *cur, *tmp; - LIST_HEAD(skipped); ktime_t start_time; u64 scan_time; + int nr_to_walk; int nr_shrunk = 0; - int retried = 0, skip_precached = 1, nr_skipped = 0; + int retried = 0, nr_skipped = 0; es_stats = &sbi->s_es_stats; start_time = ktime_get(); - spin_lock(&sbi->s_es_lru_lock); retry: - list_for_each_safe(cur, tmp, &sbi->s_es_lru) { + spin_lock(&sbi->s_es_lock); + nr_to_walk = sbi->s_es_nr_inode; + while (nr_to_walk-- > 0) { int shrunk; - /* - * If we have already reclaimed all extents from extent - * status tree, just stop the loop immediately. - */ - if (percpu_counter_read_positive( - &es_stats->es_stats_lru_cnt) == 0) - break; - - ei = list_entry(cur, struct ext4_inode_info, i_es_lru); + if (list_empty(&sbi->s_es_list)) { + spin_unlock(&sbi->s_es_lock); + goto out; + } + ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, + i_es_list); + /* Move the inode to the tail */ + list_move(&ei->i_es_list, sbi->s_es_list.prev); /* - * Skip the inode that is newer than the last_sorted - * time. Normally we try hard to avoid shrinking - * precached inodes, but we will as a last resort. + * Normally we try hard to avoid shrinking precached inodes, + * but we will as a last resort. */ - if ((es_stats->es_stats_last_sorted < ei->i_touch_when) || - (skip_precached && ext4_test_inode_state(&ei->vfs_inode, - EXT4_STATE_EXT_PRECACHED))) { + if (!retried && ext4_test_inode_state(&ei->vfs_inode, + EXT4_STATE_EXT_PRECACHED)) { nr_skipped++; - list_move_tail(cur, &skipped); continue; } - if (ei->i_es_lru_nr == 0 || ei == locked_ei || - !write_trylock(&ei->i_es_lock)) + if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { + nr_skipped++; continue; + } + /* + * Now we hold i_es_lock which protects us from inode reclaim + * freeing inode under us + */ + spin_unlock(&sbi->s_es_lock); shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); - if (ei->i_es_lru_nr == 0) - list_del_init(&ei->i_es_lru); write_unlock(&ei->i_es_lock); nr_shrunk += shrunk; nr_to_scan -= shrunk; + if (nr_to_scan == 0) - break; + goto out; + spin_lock(&sbi->s_es_lock); } - - /* Move the newer inodes into the tail of the LRU list. */ - list_splice_tail(&skipped, &sbi->s_es_lru); - INIT_LIST_HEAD(&skipped); + spin_unlock(&sbi->s_es_lock); /* * If we skipped any inodes, and we weren't able to make any - * forward progress, sort the list and try again. + * forward progress, try again to scan precached inodes. */ if ((nr_shrunk == 0) && nr_skipped && !retried) { retried++; - list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); - es_stats->es_stats_last_sorted = jiffies; - ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, - i_es_lru); - /* - * If there are no non-precached inodes left on the - * list, start releasing precached extents. - */ - if (ext4_test_inode_state(&ei->vfs_inode, - EXT4_STATE_EXT_PRECACHED)) - skip_precached = 0; goto retry; } - spin_unlock(&sbi->s_es_lru_lock); - if (locked_ei && nr_shrunk == 0) nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); +out: scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); if (likely(es_stats->es_stats_scan_time)) es_stats->es_stats_scan_time = (scan_time + @@ -1043,7 +1044,7 @@ retry: else es_stats->es_stats_shrunk = nr_shrunk; - trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached, + trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, nr_skipped, retried); return nr_shrunk; } @@ -1055,7 +1056,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink, struct ext4_sb_info *sbi; sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); - nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); + nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); return nr; } @@ -1068,13 +1069,13 @@ static unsigned long ext4_es_scan(struct shrinker *shrink, int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk; - ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); if (!nr_to_scan) return ret; - nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); return nr_shrunk; @@ -1102,28 +1103,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v) return 0; /* here we just find an inode that has the max nr. of objects */ - spin_lock(&sbi->s_es_lru_lock); - list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) { + spin_lock(&sbi->s_es_lock); + list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { inode_cnt++; if (max && max->i_es_all_nr < ei->i_es_all_nr) max = ei; else if (!max) max = ei; } - spin_unlock(&sbi->s_es_lru_lock); + spin_unlock(&sbi->s_es_lock); seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), - percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt)); + percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); seq_printf(seq, " %lu/%lu cache hits/misses\n", es_stats->es_stats_cache_hits, es_stats->es_stats_cache_misses); - if (es_stats->es_stats_last_sorted != 0) - seq_printf(seq, " %u ms last sorted interval\n", - jiffies_to_msecs(jiffies - - es_stats->es_stats_last_sorted)); if (inode_cnt) - seq_printf(seq, " %d inodes on lru list\n", inode_cnt); + seq_printf(seq, " %d inodes on list\n", inode_cnt); seq_printf(seq, "average:\n %llu us scan time\n", div_u64(es_stats->es_stats_scan_time, 1000)); @@ -1132,7 +1129,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v) seq_printf(seq, "maximum:\n %lu inode (%u objects, %u reclaimable)\n" " %llu us max scan time\n", - max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr, + max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, div_u64(es_stats->es_stats_max_scan_time, 1000)); return 0; @@ -1181,9 +1178,9 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi) { int err; - INIT_LIST_HEAD(&sbi->s_es_lru); - spin_lock_init(&sbi->s_es_lru_lock); - sbi->s_es_stats.es_stats_last_sorted = 0; + INIT_LIST_HEAD(&sbi->s_es_list); + sbi->s_es_nr_inode = 0; + spin_lock_init(&sbi->s_es_lock); sbi->s_es_stats.es_stats_shrunk = 0; sbi->s_es_stats.es_stats_cache_hits = 0; sbi->s_es_stats.es_stats_cache_misses = 0; @@ -1192,7 +1189,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi) err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); if (err) return err; - err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL); + err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); if (err) goto err1; @@ -1210,7 +1207,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi) return 0; err2: - percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); + percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); err1: percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); return err; @@ -1221,37 +1218,10 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) if (sbi->s_proc) remove_proc_entry("es_shrinker_info", sbi->s_proc); percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); - percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); + percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); unregister_shrinker(&sbi->s_es_shrinker); } -void ext4_es_lru_add(struct inode *inode) -{ - struct ext4_inode_info *ei = EXT4_I(inode); - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - - ei->i_touch_when = jiffies; - - if (!list_empty(&ei->i_es_lru)) - return; - - spin_lock(&sbi->s_es_lru_lock); - if (list_empty(&ei->i_es_lru)) - list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); - spin_unlock(&sbi->s_es_lru_lock); -} - -void ext4_es_lru_del(struct inode *inode) -{ - struct ext4_inode_info *ei = EXT4_I(inode); - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); - - spin_lock(&sbi->s_es_lru_lock); - if (!list_empty(&ei->i_es_lru)) - list_del_init(&ei->i_es_lru); - spin_unlock(&sbi->s_es_lru_lock); -} - static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, int nr_to_scan) { @@ -1263,7 +1233,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); - if (ei->i_es_lru_nr == 0) + if (ei->i_es_shk_nr == 0) return 0; if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index efd5f970b501..0e6a33e81e5f 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -65,14 +65,13 @@ struct ext4_es_tree { }; struct ext4_es_stats { - unsigned long es_stats_last_sorted; unsigned long es_stats_shrunk; unsigned long es_stats_cache_hits; unsigned long es_stats_cache_misses; u64 es_stats_scan_time; u64 es_stats_max_scan_time; struct percpu_counter es_stats_all_cnt; - struct percpu_counter es_stats_lru_cnt; + struct percpu_counter es_stats_shk_cnt; }; extern int __init ext4_init_es(void); @@ -151,7 +150,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es, extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi); extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); -extern void ext4_es_lru_add(struct inode *inode); -extern void ext4_es_lru_del(struct inode *inode); +extern void ext4_es_list_add(struct inode *inode); +extern void ext4_es_list_del(struct inode *inode); #endif /* _EXT4_EXTENTS_STATUS_H */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d5a46a8df70b..540b0b0481a5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -486,7 +486,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { - ext4_es_lru_add(inode); + ext4_es_list_add(inode); if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; @@ -1388,7 +1388,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, &es)) { - ext4_es_lru_add(inode); + ext4_es_list_add(inode); if (ext4_es_is_hole(&es)) { retval = 0; down_read(&EXT4_I(inode)->i_data_sem); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index bfda18a15592..7b377c41dd81 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -78,8 +78,8 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); - ext4_es_lru_del(inode1); - ext4_es_lru_del(inode2); + ext4_es_list_del(inode1); + ext4_es_list_del(inode2); isize = i_size_read(inode1); i_size_write(inode1, i_size_read(inode2)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4b79f39ebf66..32df08e99ca9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -871,10 +871,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); - INIT_LIST_HEAD(&ei->i_es_lru); + INIT_LIST_HEAD(&ei->i_es_list); ei->i_es_all_nr = 0; - ei->i_es_lru_nr = 0; - ei->i_touch_when = 0; + ei->i_es_shk_nr = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; @@ -963,7 +962,7 @@ void ext4_clear_inode(struct inode *inode) dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); - ext4_es_lru_del(inode); + ext4_es_list_del(inode); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index cd37a584ee88..6cfb841fea7c 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -2450,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range, TRACE_EVENT(ext4_es_shrink, TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, - int skip_precached, int nr_skipped, int retried), + int nr_skipped, int retried), - TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried), + TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, nr_shrunk ) __field( unsigned long long, scan_time ) - __field( int, skip_precached ) __field( int, nr_skipped ) __field( int, retried ) ), @@ -2467,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink, __entry->dev = sb->s_dev; __entry->nr_shrunk = nr_shrunk; __entry->scan_time = div_u64(scan_time, 1000); - __entry->skip_precached = skip_precached; __entry->nr_skipped = nr_skipped; __entry->retried = retried; ), - TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d " + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu " "nr_skipped %d retried %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, - __entry->scan_time, __entry->skip_precached, - __entry->nr_skipped, __entry->retried) + __entry->scan_time, __entry->nr_skipped, __entry->retried) ); #endif /* _TRACE_EXT4_H */ -- cgit v1.2.3 From b0dea4c1651f3cdb6d17604fa473e72cb74cdc6b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2014 11:49:25 -0500 Subject: ext4: move handling of list of shrinkable inodes into extent status code Currently callers adding extents to extent status tree were responsible for adding the inode to the list of inodes with freeable extents. This is error prone and puts list handling in unnecessarily many places. Just add inode to the list automatically when the first non-delay extent is added to the tree and remove inode from the list when the last non-delay extent is removed. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext4/extents.c | 2 -- fs/ext4/extents_status.c | 10 ++++++---- fs/ext4/extents_status.h | 2 -- fs/ext4/inode.c | 2 -- fs/ext4/ioctl.c | 2 -- fs/ext4/super.c | 1 - 6 files changed, 6 insertions(+), 13 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e406f66a903f..e2424bafd6fe 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4632,7 +4632,6 @@ out2: trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); - ext4_es_list_add(inode); return err ? err : allocated; } @@ -5191,7 +5190,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, error = ext4_fill_fiemap_extents(inode, start_blk, len_blks, fieinfo); } - ext4_es_list_add(inode); return error; } diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 0193ca107396..de2d9d8bf22f 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -298,7 +298,7 @@ out: trace_ext4_es_find_delayed_extent_range_exit(inode, es); } -void ext4_es_list_add(struct inode *inode) +static void ext4_es_list_add(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -314,7 +314,7 @@ void ext4_es_list_add(struct inode *inode) spin_unlock(&sbi->s_es_lock); } -void ext4_es_list_del(struct inode *inode) +static void ext4_es_list_del(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -344,7 +344,8 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, * We don't count delayed extent because we never try to reclaim them */ if (!ext4_es_is_delayed(es)) { - EXT4_I(inode)->i_es_shk_nr++; + if (!EXT4_I(inode)->i_es_shk_nr++) + ext4_es_list_add(inode); percpu_counter_inc(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } @@ -363,7 +364,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) /* Decrease the shrink counter when this es is not delayed */ if (!ext4_es_is_delayed(es)) { BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); - EXT4_I(inode)->i_es_shk_nr--; + if (!--EXT4_I(inode)->i_es_shk_nr) + ext4_es_list_del(inode); percpu_counter_dec(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index 0e6a33e81e5f..b0b78b95f481 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -150,7 +150,5 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es, extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi); extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); -extern void ext4_es_list_add(struct inode *inode); -extern void ext4_es_list_del(struct inode *inode); #endif /* _EXT4_EXTENTS_STATUS_H */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 540b0b0481a5..b416b461fa50 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -486,7 +486,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { - ext4_es_list_add(inode); if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; @@ -1388,7 +1387,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, &es)) { - ext4_es_list_add(inode); if (ext4_es_is_hole(&es)) { retval = 0; down_read(&EXT4_I(inode)->i_data_sem); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 7b377c41dd81..f58a0d106726 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -78,8 +78,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); - ext4_es_list_del(inode1); - ext4_es_list_del(inode2); isize = i_size_read(inode1); i_size_write(inode1, i_size_read(inode2)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 32df08e99ca9..e2a17f8b7adc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -962,7 +962,6 @@ void ext4_clear_inode(struct inode *inode) dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); - ext4_es_list_del(inode); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); -- cgit v1.2.3 From dd4759255188771e60cf3455982959a1ba04f4eb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2014 11:51:23 -0500 Subject: ext4: limit number of scanned extents in status tree shrinker Currently we scan extent status trees of inodes until we reclaim nr_to_scan extents. This can however require a lot of scanning when there are lots of delayed extents (as those cannot be reclaimed). Change shrinker to work as shrinkers are supposed to and *scan* only nr_to_scan extents regardless of how many extents did we actually reclaim. We however need to be careful and avoid scanning each status tree from the beginning - that could lead to a situation where we would not be able to reclaim anything at all when first nr_to_scan extents in the tree are always unreclaimable. We remember with each inode offset where we stopped scanning and continue from there when we next come across the inode. Note that we also need to update places calling __es_shrink() manually to pass reasonable nr_to_scan to have a chance of reclaiming anything and not just 1. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 5 ++- fs/ext4/extents_status.c | 91 +++++++++++++++++++++++++++++++----------------- fs/ext4/super.c | 1 + 3 files changed, 64 insertions(+), 33 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ab6caf55f5bf..4186ec84f835 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -881,6 +881,9 @@ struct ext4_inode_info { struct list_head i_es_list; unsigned int i_es_all_nr; /* protected by i_es_lock */ unsigned int i_es_shk_nr; /* protected by i_es_lock */ + ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for + extents to shrink. Protected by + i_es_lock */ /* ialloc */ ext4_group_t i_last_alloc_group; @@ -1321,7 +1324,7 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; - struct list_head s_es_list; + struct list_head s_es_list; /* List of inodes with reclaimable extents */ long s_es_nr_inode; struct ext4_es_stats s_es_stats; struct mb_cache *s_mb_cache; diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index de2d9d8bf22f..8f2aac4006d2 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -147,8 +147,7 @@ static struct kmem_cache *ext4_es_cachep; static int __es_insert_extent(struct inode *inode, struct extent_status *newes); static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end); -static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, - int nr_to_scan); +static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei); @@ -716,7 +715,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, retry: err = __es_insert_extent(inode, &newes); if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), - 1, EXT4_I(inode))) + 128, EXT4_I(inode))) goto retry; if (err == -ENOMEM && !ext4_es_is_delayed(&newes)) err = 0; @@ -874,7 +873,7 @@ retry: es->es_len = orig_es.es_len; if ((err == -ENOMEM) && __es_shrink(EXT4_SB(inode->i_sb), - 1, EXT4_I(inode))) + 128, EXT4_I(inode))) goto retry; goto out; } @@ -976,8 +975,6 @@ retry: spin_lock(&sbi->s_es_lock); nr_to_walk = sbi->s_es_nr_inode; while (nr_to_walk-- > 0) { - int shrunk; - if (list_empty(&sbi->s_es_list)) { spin_unlock(&sbi->s_es_lock); goto out; @@ -985,7 +982,7 @@ retry: ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, i_es_list); /* Move the inode to the tail */ - list_move(&ei->i_es_list, sbi->s_es_list.prev); + list_move_tail(&ei->i_es_list, &sbi->s_es_list); /* * Normally we try hard to avoid shrinking precached inodes, @@ -1007,13 +1004,10 @@ retry: */ spin_unlock(&sbi->s_es_lock); - shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); + nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); write_unlock(&ei->i_es_lock); - nr_shrunk += shrunk; - nr_to_scan -= shrunk; - - if (nr_to_scan == 0) + if (nr_to_scan <= 0) goto out; spin_lock(&sbi->s_es_lock); } @@ -1029,7 +1023,7 @@ retry: } if (locked_ei && nr_shrunk == 0) - nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); + nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); out: scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); @@ -1224,27 +1218,33 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) unregister_shrinker(&sbi->s_es_shrinker); } -static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, - int nr_to_scan) +/* + * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at + * most *nr_to_scan extents, update *nr_to_scan accordingly. + * + * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. + * Increment *nr_shrunk by the number of reclaimed extents. Also update + * ei->i_es_shrink_lblk to where we should continue scanning. + */ +static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, + int *nr_to_scan, int *nr_shrunk) { struct inode *inode = &ei->vfs_inode; struct ext4_es_tree *tree = &ei->i_es_tree; - struct rb_node *node; struct extent_status *es; - unsigned long nr_shrunk = 0; - static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, - DEFAULT_RATELIMIT_BURST); - - if (ei->i_es_shk_nr == 0) - return 0; + struct rb_node *node; - if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && - __ratelimit(&_rs)) - ext4_warning(inode->i_sb, "forced shrink of precached extents"); + es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); + if (!es) + goto out_wrap; + node = &es->rb_node; + while (*nr_to_scan > 0) { + if (es->es_lblk > end) { + ei->i_es_shrink_lblk = end + 1; + return 0; + } - node = rb_first(&tree->root); - while (node != NULL) { - es = rb_entry(node, struct extent_status, rb_node); + (*nr_to_scan)--; node = rb_next(&es->rb_node); /* * We can't reclaim delayed extent from status tree because @@ -1253,11 +1253,38 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, if (!ext4_es_is_delayed(es)) { rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); - nr_shrunk++; - if (--nr_to_scan == 0) - break; + (*nr_shrunk)++; } + if (!node) + goto out_wrap; + es = rb_entry(node, struct extent_status, rb_node); } - tree->cache_es = NULL; + ei->i_es_shrink_lblk = es->es_lblk; + return 1; +out_wrap: + ei->i_es_shrink_lblk = 0; + return 0; +} + +static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) +{ + struct inode *inode = &ei->vfs_inode; + int nr_shrunk = 0; + ext4_lblk_t start = ei->i_es_shrink_lblk; + static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + + if (ei->i_es_shk_nr == 0) + return 0; + + if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && + __ratelimit(&_rs)) + ext4_warning(inode->i_sb, "forced shrink of precached extents"); + + if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && + start != 0) + es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); + + ei->i_es_tree.cache_es = NULL; return nr_shrunk; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e2a17f8b7adc..48318497e8e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -874,6 +874,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) INIT_LIST_HEAD(&ei->i_es_list); ei->i_es_all_nr = 0; ei->i_es_shk_nr = 0; + ei->i_es_shrink_lblk = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; -- cgit v1.2.3 From 4fdb5543183d027a19805b72025b859af73d0863 Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Tue, 25 Nov 2014 13:08:04 -0500 Subject: ext4: cleanup GFP flags inside resize path We must use GFP_NOFS instead GFP_KERNEL inside ext4_mb_add_groupinfo and ext4_calculate_overhead() because they are called from inside a journal transaction. Call trace: ioctl ->ext4_group_add ->journal_start ->ext4_setup_new_descs ->ext4_mb_add_groupinfo -> GFP_KERNEL ->ext4_flex_group_add ->ext4_update_super ->ext4_calculate_overhead -> GFP_KERNEL ->journal_stop Signed-off-by: Dmitry Monakhov Signed-off-by: Theodore Ts'o --- fs/ext4/mballoc.c | 6 +++--- fs/ext4/super.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 004d0ff8325f..f1c25f83fc7d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2385,7 +2385,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); - meta_group_info = kmalloc(metalen, GFP_KERNEL); + meta_group_info = kmalloc(metalen, GFP_NOFS); if (meta_group_info == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate mem " "for a buddy group"); @@ -2399,7 +2399,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); - meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); + meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); if (meta_group_info[i] == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); goto exit_group_info; @@ -2428,7 +2428,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, { struct buffer_head *bh; meta_group_info[i]->bb_bitmap = - kmalloc(sb->s_blocksize, GFP_KERNEL); + kmalloc(sb->s_blocksize, GFP_NOFS); BUG_ON(meta_group_info[i]->bb_bitmap == NULL); bh = ext4_read_block_bitmap(sb, group); BUG_ON(bh == NULL); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48318497e8e9..b643009fddfe 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3300,7 +3300,7 @@ int ext4_calculate_overhead(struct super_block *sb) struct ext4_super_block *es = sbi->s_es; ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_fsblk_t overhead = 0; - char *buf = (char *) get_zeroed_page(GFP_KERNEL); + char *buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) return -ENOMEM; -- cgit v1.2.3 From c6d3d56dd0ef6c15fc007413c9d024021c178cf9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 25 Nov 2014 16:20:50 -0500 Subject: ext4: create nojournal_checksum mount option Create a mount option to disable journal checksumming (because the metadata_csum feature turns it on by default now), and fix remount not to allow changing the journal checksumming option, since changing the mount options has no effect on the journal. Signed-off-by: Darrick J. Wong Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b643009fddfe..3bd2982da36d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1136,7 +1136,7 @@ enum { Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, - Opt_max_dir_size_kb, + Opt_max_dir_size_kb, Opt_nojournal_checksum, }; static const match_table_t tokens = { @@ -1170,6 +1170,7 @@ static const match_table_t tokens = { {Opt_journal_dev, "journal_dev=%u"}, {Opt_journal_path, "journal_path=%s"}, {Opt_journal_checksum, "journal_checksum"}, + {Opt_nojournal_checksum, "nojournal_checksum"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, @@ -1351,6 +1352,8 @@ static const struct mount_opts { MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR}, + {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, + MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | @@ -4844,6 +4847,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ + test_opt(sb, JOURNAL_CHECKSUM)) { + ext4_msg(sb, KERN_ERR, "changing journal_checksum " + "during remount not supported"); + err = -EINVAL; + goto restore_opts; + } + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " -- cgit v1.2.3 From b003b52496b9bea9b186b127aea730b79d2a83bd Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 25 Nov 2014 16:27:44 -0500 Subject: ext4: don't count external journal blocks as overhead This was fixed for ext3 with: e6d8fb3 ext3: Count internal journal as bsddf overhead in ext3_statfs but was never fixed for ext4. With a large external journal and no used disk blocks, df comes out negative without this, as journal blocks are added to the overhead & subtracted from used blocks unconditionally. Signed-off-by: Eric Sandeen Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3bd2982da36d..f8ad756bb852 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3331,8 +3331,8 @@ int ext4_calculate_overhead(struct super_block *sb) memset(buf, 0, PAGE_SIZE); cond_resched(); } - /* Add the journal blocks as well */ - if (sbi->s_journal) + /* Add the internal journal blocks as well */ + if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); sbi->s_overhead = overhead; -- cgit v1.2.3 From d4f761074353b9aa42a3bdd039d78e1af5f5f29f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Nov 2014 20:19:17 -0500 Subject: ext4: forbid journal_async_commit in data=ordered mode Option journal_async_commit breaks gurantees of data=ordered mode as it sends only a single cache flush after writing a transaction commit block. Thus even though the transaction including the commit block is fully stored on persistent storage, file data may still linger in drives caches and will be lost on power failure. Since all checksums match on journal recovery, we replay the transaction thus possibly exposing stale user data. To fix this data exposure issue, remove the possibility to use journal_async_commit in data=ordered mode. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/ext4/super.c') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f8ad756bb852..4fca81cc8fce 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1695,6 +1695,12 @@ static int parse_options(char *options, struct super_block *sb, return 0; } } + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && + test_opt(sb, JOURNAL_ASYNC_COMMIT)) { + ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit " + "in data=ordered mode"); + return 0; + } return 1; } -- cgit v1.2.3