diff options
author | Josef Bacik <josef@toxicpanda.com> | 2019-06-20 22:38:07 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2019-09-09 15:59:10 +0300 |
commit | e11c0406ad1bb602e1e880fa4ff37dadb716639d (patch) | |
tree | 1797055060d9a2c050e6587674f72e6f3b02d41d /fs/btrfs | |
parent | 3e43c279e824889dacd5321505a88506e8c772e3 (diff) | |
download | linux-e11c0406ad1bb602e1e880fa4ff37dadb716639d.tar.xz |
btrfs: unexport the temporary exported functions
These were renamed and exported to facilitate logical migration of
different code chunks into block-group.c. Now that all the users are in
one file go ahead and rename them back, move the code around, and make
them static.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/block-group.c | 28 | ||||
-rw-r--r-- | fs/btrfs/block-group.h | 4 |
2 files changed, 14 insertions, 18 deletions
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9c9f5a1bcdcb..55d6d1c36b62 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -21,7 +21,7 @@ * * Should be called with balance_lock held */ -u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) +static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; u64 target = 0; @@ -62,7 +62,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) * reduce to the target profile */ spin_lock(&fs_info->balance_lock); - target = btrfs_get_restripe_target(fs_info, flags); + target = get_restripe_target(fs_info, flags); if (target) { /* Pick target profile only if it's already available */ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { @@ -424,7 +424,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) } #ifdef CONFIG_BTRFS_DEBUG -void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) +static void fragment_free_space(struct btrfs_block_group_cache *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; u64 start = block_group->key.objectid; @@ -661,7 +661,7 @@ static noinline void caching_thread(struct btrfs_work *work) block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); - btrfs_fragment_free_space(block_group); + fragment_free_space(block_group); } #endif @@ -768,7 +768,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - btrfs_fragment_free_space(cache); + fragment_free_space(cache); } #endif mutex_unlock(&caching_ctl->mutex); @@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( * data in this block group. That check should be done by relocation routine, * not this function. */ -int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -1296,7 +1296,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&block_group->lock); /* We don't want to force the issue, only flip if it's ok. */ - ret = __btrfs_inc_block_group_ro(block_group, 0); + ret = inc_block_group_ro(block_group, 0); up_write(&space_info->groups_sem); if (ret < 0) { ret = 0; @@ -1822,7 +1822,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(info, cache->key.objectid)) { - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); } else if (btrfs_block_group_used(&cache->item) == 0) { ASSERT(list_empty(&cache->bg_list)); btrfs_mark_bg_unused(cache); @@ -1843,11 +1843,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_RAID0], list) - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_SINGLE], list) - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); } btrfs_init_global_block_rsv(info); @@ -1936,7 +1936,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 new_bytes_used = size - bytes_used; bytes_used += new_bytes_used >> 1; - btrfs_fragment_free_space(cache); + fragment_free_space(cache); } #endif /* @@ -1982,7 +1982,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) * if restripe for this chunk_type is on pick target profile and * return, otherwise do the usual balance */ - stripped = btrfs_get_restripe_target(fs_info, flags); + stripped = get_restripe_target(fs_info, flags); if (stripped) return extended_to_chunk(stripped); @@ -2070,14 +2070,14 @@ again: goto out; } - ret = __btrfs_inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, 0); if (!ret) goto out; alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); if (ret < 0) goto out; - ret = __btrfs_inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(fs_info, cache->flags); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 55e68a8d2c44..5c6e2fb23e35 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -166,7 +166,6 @@ static inline int btrfs_should_fragment_free_space( (btrfs_test_opt(fs_info, FRAGMENT_DATA) && block_group->flags & BTRFS_BLOCK_GROUP_DATA); } -void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group); #endif struct btrfs_block_group_cache *btrfs_lookup_first_block_group( @@ -246,7 +245,4 @@ static inline int btrfs_block_group_cache_done( cache->cached == BTRFS_CACHE_ERROR; } -int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force); -u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags); - #endif /* BTRFS_BLOCK_GROUP_H */ |