diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 45 |
1 files changed, 19 insertions, 26 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 74aa552f4793..f74dc259307b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -88,10 +88,11 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, return inode; } -struct inode *lookup_free_space_inode(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache - *block_group, struct btrfs_path *path) +struct inode *lookup_free_space_inode( + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) { + struct btrfs_fs_info *fs_info = block_group->fs_info; struct inode *inode = NULL; u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; @@ -185,20 +186,19 @@ static int __create_free_space_inode(struct btrfs_root *root, return 0; } -int create_free_space_inode(struct btrfs_fs_info *fs_info, - struct btrfs_trans_handle *trans, +int create_free_space_inode(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { int ret; u64 ino; - ret = btrfs_find_free_objectid(fs_info->tree_root, &ino); + ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino); if (ret < 0) return ret; - return __create_free_space_inode(fs_info->tree_root, trans, path, ino, - block_group->key.objectid); + return __create_free_space_inode(trans->fs_info->tree_root, trans, path, + ino, block_group->key.objectid); } int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, @@ -812,9 +812,9 @@ free_cache: goto out; } -int load_free_space_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) +int load_free_space_cache(struct btrfs_block_group_cache *block_group) { + struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct inode *inode; struct btrfs_path *path; @@ -858,7 +858,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, * once created get their ->cached field set to BTRFS_CACHE_FINISHED so * we will never try to read their inode item while the fs is mounted. */ - inode = lookup_free_space_inode(fs_info, block_group, path); + inode = lookup_free_space_inode(block_group, path); if (IS_ERR(inode)) { btrfs_free_path(path); return 0; @@ -1039,8 +1039,7 @@ fail: return -1; } -static noinline_for_stack int -write_pinned_extent_entries(struct btrfs_fs_info *fs_info, +static noinline_for_stack int write_pinned_extent_entries( struct btrfs_block_group_cache *block_group, struct btrfs_io_ctl *io_ctl, int *entries) @@ -1059,7 +1058,7 @@ write_pinned_extent_entries(struct btrfs_fs_info *fs_info, * We shouldn't have switched the pinned extents yet so this is the * right one */ - unpin = fs_info->pinned_extents; + unpin = block_group->fs_info->pinned_extents; start = block_group->key.objectid; @@ -1235,7 +1234,6 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_io_ctl *io_ctl, struct btrfs_trans_handle *trans) { - struct btrfs_fs_info *fs_info = root->fs_info; struct extent_state *cached_state = NULL; LIST_HEAD(bitmap_list); int entries = 0; @@ -1293,8 +1291,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * If this changes while we are working we'll get added back to * the dirty list and redo it. No locking needed */ - ret = write_pinned_extent_entries(fs_info, block_group, - io_ctl, &entries); + ret = write_pinned_extent_entries(block_group, io_ctl, &entries); if (ret) goto out_nospc_locked; @@ -1370,11 +1367,11 @@ out_unlock: goto out; } -int btrfs_write_out_cache(struct btrfs_fs_info *fs_info, - struct btrfs_trans_handle *trans, +int btrfs_write_out_cache(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { + struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct inode *inode; int ret = 0; @@ -1386,7 +1383,7 @@ int btrfs_write_out_cache(struct btrfs_fs_info *fs_info, } spin_unlock(&block_group->lock); - inode = lookup_free_space_inode(fs_info, block_group, path); + inode = lookup_free_space_inode(block_group, path); if (IS_ERR(inode)) return 0; @@ -3040,11 +3037,11 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, * returns zero and sets up cluster if things worked out, otherwise * it returns -enospc */ -int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group, +int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry, *tmp; LIST_HEAD(bitmaps); @@ -3366,10 +3363,6 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) em = lookup_extent_mapping(em_tree, block_group->key.objectid, 1); BUG_ON(!em); /* logic error, can't happen */ - /* - * remove_extent_mapping() will delete us from the pinned_chunks - * list, which is protected by the chunk mutex. - */ remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); mutex_unlock(&fs_info->chunk_mutex); |