From 62142be363ae902c948729eeb35851ddf34b317d Mon Sep 17 00:00:00 2001 From: Gabriel Niebler Date: Wed, 9 Mar 2022 14:50:38 +0100 Subject: btrfs: introduce btrfs_for_each_slot iterator macro There is a common pattern when searching for a key in btrfs: * Call btrfs_search_slot to find the slot for the key * Enter an endless loop: * If the found slot is larger than the no. of items in the current leaf, check the next leaf * If it's still not found in the next leaf, terminate the loop * Otherwise do something with the found key * Increment the current slot and continue To reduce code duplication, we can replace this code pattern with an iterator macro, similar to the existing for_each_X macros found elsewhere in the kernel. This also makes the code easier to understand for newcomers by putting a name to the encapsulated functionality. Signed-off-by: Marcos Paulo de Souza Signed-off-by: Gabriel Niebler Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 077c95e9baa5..1c50e051641d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3039,6 +3039,35 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *path); +int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, + struct btrfs_path *path); + +/* + * Search in @root for a given @key, and store the slot found in @found_key. + * + * @root: The root node of the tree. + * @key: The key we are looking for. + * @found_key: Will hold the found item. + * @path: Holds the current slot/leaf. + * @iter_ret: Contains the value returned from btrfs_search_slot or + * btrfs_get_next_valid_item, whichever was executed last. + * + * The @iter_ret is an output variable that will contain the return value of + * btrfs_search_slot, if it encountered an error, or the value returned from + * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid + * slot was found, 1 if there were no more leaves, and <0 if there was an error. + * + * It's recommended to use a separate variable for iter_ret and then use it to + * set the function return value so there's no confusion of the 0/1/errno + * values stemming from btrfs_search_slot. + */ +#define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \ + for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \ + (iter_ret) >= 0 && \ + (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \ + (path)->slots[0]++ \ + ) + static inline int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *p, u64 time_seq) { -- cgit v1.2.3 From a1fd0c35ffe349a7bbca27dae362932895ee8c4d Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 14 Mar 2022 18:12:32 -0700 Subject: btrfs: allocate inode outside of btrfs_new_inode() Instead of calling new_inode() and inode_init_owner() inside of btrfs_new_inode(), do it in the callers. This allows us to pass in just the inode instead of the mnt_userns and mode and removes the need for memalloc_nofs_{save,restores}() since we do it before starting a transaction. In create_subvol(), it also means we no longer have to look up the inode again to instantiate it. This also paves the way for some more cleanups in later patches. This also removes the comments about Smack checking i_op, which are no longer true since commit 5d6c31910bc0 ("xattr: Add __vfs_{get,set,remove}xattr helpers"). Now it checks inode->i_opflags & IOP_XATTR, which is set based on sb->s_xattr. Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 +- fs/btrfs/inode.c | 287 +++++++++++++++++++++++++++++-------------------------- fs/btrfs/ioctl.c | 22 +++-- 3 files changed, 169 insertions(+), 145 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1c50e051641d..d8f7479c9d17 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3284,10 +3284,11 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, unsigned int extra_bits, struct extent_state **cached_state); +struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, + struct inode *dir); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, struct btrfs_root *parent_root, - struct user_namespace *mnt_userns); + struct inode *inode); void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, unsigned *bits); void btrfs_clear_delalloc_extent(struct inode *inode, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 752ff02ab1a0..db79315efe8f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6069,15 +6069,12 @@ static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) btrfs_sync_inode_flags_to_i_flags(inode); } -static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct user_namespace *mnt_userns, - struct inode *dir, - const char *name, int name_len, - umode_t mode, u64 *index) +static int btrfs_new_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode, + struct inode *dir, const char *name, int name_len, + u64 *index) { struct btrfs_fs_info *fs_info = root->fs_info; - struct inode *inode; struct btrfs_inode_item *inode_item; struct btrfs_key *location; struct btrfs_path *path; @@ -6087,20 +6084,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, u32 sizes[2]; struct btrfs_item_batch batch; unsigned long ptr; - unsigned int nofs_flag; int ret; path = btrfs_alloc_path(); if (!path) - return ERR_PTR(-ENOMEM); - - nofs_flag = memalloc_nofs_save(); - inode = new_inode(fs_info->sb); - memalloc_nofs_restore(nofs_flag); - if (!inode) { - btrfs_free_path(path); - return ERR_PTR(-ENOMEM); - } + return -ENOMEM; /* * O_TMPFILE, set link count to 0, so that after this point, @@ -6112,8 +6100,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, ret = btrfs_get_free_objectid(root, &objectid); if (ret) { btrfs_free_path(path); - iput(inode); - return ERR_PTR(ret); + return ret; } inode->i_ino = objectid; @@ -6123,8 +6110,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, ret = btrfs_set_inode_index(BTRFS_I(dir), index); if (ret) { btrfs_free_path(path); - iput(inode); - return ERR_PTR(ret); + return ret; } } else if (dir) { *index = 0; @@ -6136,13 +6122,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, */ BTRFS_I(inode)->index_cnt = 2; BTRFS_I(inode)->dir_index = *index; - BTRFS_I(inode)->root = btrfs_grab_root(root); + if (!BTRFS_I(inode)->root) + BTRFS_I(inode)->root = btrfs_grab_root(root); BTRFS_I(inode)->generation = trans->transid; inode->i_generation = BTRFS_I(inode)->generation; btrfs_inherit_iflags(inode, dir); - if (S_ISREG(mode)) { + if (S_ISREG(inode->i_mode)) { if (btrfs_test_opt(fs_info, NODATASUM)) BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; if (btrfs_test_opt(fs_info, NODATACOW)) @@ -6187,10 +6174,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, location->type = BTRFS_INODE_ITEM_KEY; ret = btrfs_insert_inode_locked(inode); - if (ret < 0) { - iput(inode); + if (ret < 0) goto fail; - } batch.keys = &key[0]; batch.data_sizes = &sizes[0]; @@ -6200,8 +6185,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (ret != 0) goto fail_unlock; - inode_init_owner(mnt_userns, inode, dir, mode); - inode->i_mtime = current_time(inode); inode->i_atime = inode->i_mtime; inode->i_ctime = inode->i_mtime; @@ -6238,15 +6221,20 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, "error inheriting props for ino %llu (root %llu): %d", btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); - return inode; + return 0; fail_unlock: + /* + * discard_new_inode() calls iput(), but the caller owns the reference + * to the inode. + */ + ihold(inode); discard_new_inode(inode); fail: if (dir && name) BTRFS_I(dir)->index_cnt--; btrfs_free_path(path); - return ERR_PTR(ret); + return ret; } /* @@ -6344,37 +6332,36 @@ static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - struct inode *inode = NULL; + struct inode *inode; int err; u64 index = 0; + inode = new_inode(dir->i_sb); + if (!inode) + return -ENOMEM; + inode_init_owner(mnt_userns, inode, dir, mode); + inode->i_op = &btrfs_special_inode_operations; + init_special_inode(inode, inode->i_mode, rdev); + /* * 2 for inode item and ref * 2 for dir items * 1 for xattr if selinux is on */ trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + iput(inode); return PTR_ERR(trans); + } - inode = btrfs_new_inode(trans, root, mnt_userns, dir, - dentry->d_name.name, dentry->d_name.len, - mode, &index); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); + err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, + dentry->d_name.len, &index); + if (err) { + iput(inode); inode = NULL; goto out_unlock; } - /* - * If the active LSM wants to access the inode during - * d_instantiate it needs these. Smack checks to see - * if the filesystem supports xattrs by looking at the - * ops vector. - */ - inode->i_op = &btrfs_special_inode_operations; - init_special_inode(inode, inode->i_mode, rdev); - err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) goto out_unlock; @@ -6403,36 +6390,36 @@ static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - struct inode *inode = NULL; + struct inode *inode; int err; u64 index = 0; + inode = new_inode(dir->i_sb); + if (!inode) + return -ENOMEM; + inode_init_owner(mnt_userns, inode, dir, mode); + inode->i_fop = &btrfs_file_operations; + inode->i_op = &btrfs_file_inode_operations; + inode->i_mapping->a_ops = &btrfs_aops; + /* * 2 for inode item and ref * 2 for dir items * 1 for xattr if selinux is on */ trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + iput(inode); return PTR_ERR(trans); + } - inode = btrfs_new_inode(trans, root, mnt_userns, dir, - dentry->d_name.name, dentry->d_name.len, - mode, &index); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); + err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, + dentry->d_name.len, &index); + if (err) { + iput(inode); inode = NULL; goto out_unlock; } - /* - * If the active LSM wants to access the inode during - * d_instantiate it needs these. Smack checks to see - * if the filesystem supports xattrs by looking at the - * ops vector. - */ - inode->i_fop = &btrfs_file_operations; - inode->i_op = &btrfs_file_inode_operations; - inode->i_mapping->a_ops = &btrfs_aops; err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) @@ -6541,34 +6528,38 @@ static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, umode_t mode) { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); - struct inode *inode = NULL; + struct inode *inode; struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - int err = 0; + int err; u64 index = 0; + inode = new_inode(dir->i_sb); + if (!inode) + return -ENOMEM; + inode_init_owner(mnt_userns, inode, dir, S_IFDIR | mode); + inode->i_op = &btrfs_dir_inode_operations; + inode->i_fop = &btrfs_dir_file_operations; + /* * 2 items for inode and ref * 2 items for dir items * 1 for xattr if selinux is on */ trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + iput(inode); return PTR_ERR(trans); + } - inode = btrfs_new_inode(trans, root, mnt_userns, dir, - dentry->d_name.name, dentry->d_name.len, - S_IFDIR | mode, &index); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); + err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, + dentry->d_name.len, &index); + if (err) { + iput(inode); inode = NULL; goto out_fail; } - /* these must be set before we unlock the inode */ - inode->i_op = &btrfs_dir_inode_operations; - inode->i_fop = &btrfs_dir_file_operations; - err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) goto out_fail; @@ -8724,25 +8715,39 @@ out: return ret; } +struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, + struct inode *dir) +{ + struct inode *inode; + + inode = new_inode(dir->i_sb); + if (inode) { + /* + * Subvolumes don't inherit the sgid bit or the parent's gid if + * the parent's sgid bit is set. This is probably a bug. + */ + inode_init_owner(mnt_userns, inode, NULL, + S_IFDIR | (~current_umask() & S_IRWXUGO)); + inode->i_op = &btrfs_dir_inode_operations; + inode->i_fop = &btrfs_dir_file_operations; + } + return inode; +} + /* * create a new subvolume directory/inode (helper for the ioctl). */ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, struct btrfs_root *parent_root, - struct user_namespace *mnt_userns) + struct inode *inode) { - struct inode *inode; + struct btrfs_root *new_root = BTRFS_I(inode)->root; int err; u64 index = 0; - inode = btrfs_new_inode(trans, new_root, mnt_userns, NULL, "..", 2, - S_IFDIR | (~current_umask() & S_IRWXUGO), - &index); - if (IS_ERR(inode)) - return PTR_ERR(inode); - inode->i_op = &btrfs_dir_inode_operations; - inode->i_fop = &btrfs_dir_file_operations; + err = btrfs_new_inode(trans, new_root, inode, NULL, "..", 2, &index); + if (err) + return err; unlock_new_inode(inode); @@ -8753,8 +8758,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, new_root->root_key.objectid, err); err = btrfs_update_inode(trans, new_root, BTRFS_I(inode)); - - iput(inode); return err; } @@ -9231,31 +9234,36 @@ out_notrans: return ret; } +static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns, + struct inode *dir) +{ + struct inode *inode; + + inode = new_inode(dir->i_sb); + if (inode) { + inode_init_owner(mnt_userns, inode, dir, + S_IFCHR | WHITEOUT_MODE); + inode->i_op = &btrfs_special_inode_operations; + init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); + } + return inode; +} + static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct user_namespace *mnt_userns, - struct inode *dir, + struct inode *inode, struct inode *dir, struct dentry *dentry) { int ret; - struct inode *inode; u64 index; - inode = btrfs_new_inode(trans, root, mnt_userns, dir, - dentry->d_name.name, - dentry->d_name.len, - S_IFCHR | WHITEOUT_MODE, - &index); - - if (IS_ERR(inode)) { - ret = PTR_ERR(inode); + ret = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, + dentry->d_name.len, &index); + if (ret) { + iput(inode); return ret; } - inode->i_op = &btrfs_special_inode_operations; - init_special_inode(inode, inode->i_mode, - WHITEOUT_DEV); - ret = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (ret) @@ -9282,6 +9290,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns, unsigned int flags) { struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); + struct inode *whiteout_inode; struct btrfs_trans_handle *trans; unsigned int trans_num_items; struct btrfs_root *root = BTRFS_I(old_dir)->root; @@ -9336,6 +9345,12 @@ static int btrfs_rename(struct user_namespace *mnt_userns, if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) filemap_flush(old_inode->i_mapping); + if (flags & RENAME_WHITEOUT) { + whiteout_inode = new_whiteout_inode(mnt_userns, old_dir); + if (!whiteout_inode) + return -ENOMEM; + } + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { /* Close the race window with snapshot create/destroy ioctl */ down_read(&fs_info->subvol_sem); @@ -9472,9 +9487,9 @@ static int btrfs_rename(struct user_namespace *mnt_userns, rename_ctx.index, new_dentry->d_parent); if (flags & RENAME_WHITEOUT) { - ret = btrfs_whiteout_for_rename(trans, root, mnt_userns, + ret = btrfs_whiteout_for_rename(trans, root, whiteout_inode, old_dir, old_dentry); - + whiteout_inode = NULL; if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; @@ -9486,7 +9501,8 @@ out_fail: out_notrans: if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); - + if (flags & RENAME_WHITEOUT) + iput(whiteout_inode); return ret; } @@ -9705,7 +9721,7 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct btrfs_key key; - struct inode *inode = NULL; + struct inode *inode; int err; u64 index = 0; int name_len; @@ -9718,6 +9734,14 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) return -ENAMETOOLONG; + inode = new_inode(dir->i_sb); + if (!inode) + return -ENOMEM; + inode_init_owner(mnt_userns, inode, dir, S_IFLNK | S_IRWXUGO); + inode->i_op = &btrfs_symlink_inode_operations; + inode_nohighmem(inode); + inode->i_mapping->a_ops = &btrfs_aops; + /* * 2 items for inode item and ref * 2 items for dir items @@ -9726,28 +9750,19 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, * 1 item for xattr if selinux is on */ trans = btrfs_start_transaction(root, 7); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + iput(inode); return PTR_ERR(trans); + } - inode = btrfs_new_inode(trans, root, mnt_userns, dir, - dentry->d_name.name, dentry->d_name.len, - S_IFLNK | S_IRWXUGO, &index); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); + err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, + dentry->d_name.len, &index); + if (err) { + iput(inode); inode = NULL; goto out_unlock; } - /* - * If the active LSM wants to access the inode during - * d_instantiate it needs these. Smack checks to see - * if the filesystem supports xattrs by looking at the - * ops vector. - */ - inode->i_fop = &btrfs_file_operations; - inode->i_op = &btrfs_file_inode_operations; - inode->i_mapping->a_ops = &btrfs_aops; - err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) goto out_unlock; @@ -9783,8 +9798,6 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); - inode->i_op = &btrfs_symlink_inode_operations; - inode_nohighmem(inode); inode_set_bytes(inode, name_len); btrfs_i_size_write(BTRFS_I(inode), name_len); err = btrfs_update_inode(trans, root, BTRFS_I(inode)); @@ -10059,30 +10072,34 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; - struct inode *inode = NULL; + struct inode *inode; u64 index; - int ret = 0; + int ret; + + inode = new_inode(dir->i_sb); + if (!inode) + return -ENOMEM; + inode_init_owner(mnt_userns, inode, dir, mode); + inode->i_fop = &btrfs_file_operations; + inode->i_op = &btrfs_file_inode_operations; + inode->i_mapping->a_ops = &btrfs_aops; /* * 5 units required for adding orphan entry */ trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) + if (IS_ERR(trans)) { + iput(inode); return PTR_ERR(trans); + } - inode = btrfs_new_inode(trans, root, mnt_userns, dir, NULL, 0, - mode, &index); - if (IS_ERR(inode)) { - ret = PTR_ERR(inode); + ret = btrfs_new_inode(trans, root, inode, dir, NULL, 0, &index); + if (ret) { + iput(inode); inode = NULL; goto out; } - inode->i_fop = &btrfs_file_operations; - inode->i_op = &btrfs_file_inode_operations; - - inode->i_mapping->a_ops = &btrfs_aops; - ret = btrfs_init_inode_security(trans, inode, dir, NULL); if (ret) goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4a015579a46e..4b903517fdc3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -587,6 +587,12 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, if (ret < 0) goto out_root_item; + inode = btrfs_new_subvol_inode(mnt_userns, dir); + if (!inode) { + ret = -ENOMEM; + goto out_anon_dev; + } + btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * The same as the snapshot creation, please see the comment @@ -594,13 +600,13 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, */ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false); if (ret) - goto out_anon_dev; + goto out_inode; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_subvolume_release_metadata(root, &block_rsv); - goto out_anon_dev; + goto out_inode; } trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; @@ -683,16 +689,16 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, } /* anon_dev is owned by new_root now. */ anon_dev = 0; + BTRFS_I(inode)->root = new_root; + /* ... and new_root is owned by inode now. */ ret = btrfs_record_root_in_trans(trans, new_root); if (ret) { - btrfs_put_root(new_root); btrfs_abort_transaction(trans, ret); goto out; } - ret = btrfs_create_subvol_root(trans, new_root, root, mnt_userns); - btrfs_put_root(new_root); + ret = btrfs_create_subvol_root(trans, root, inode); if (ret) { /* We potentially lose an unused inode item here */ btrfs_abort_transaction(trans, ret); @@ -745,11 +751,11 @@ out: ret = btrfs_commit_transaction(trans); if (!ret) { - inode = btrfs_lookup_dentry(dir, dentry); - if (IS_ERR(inode)) - return PTR_ERR(inode); d_instantiate(dentry, inode); + inode = NULL; } +out_inode: + iput(inode); out_anon_dev: if (anon_dev) free_anon_bdev(anon_dev); -- cgit v1.2.3 From 3538d68dbd97a2f5599bf39aeee47f027417fc39 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 14 Mar 2022 18:12:34 -0700 Subject: btrfs: reserve correct number of items for inode creation The various inode creation code paths do not account for the compression property, POSIX ACLs, or the parent inode item when starting a transaction. Fix it by refactoring all of these code paths to use a new function, btrfs_new_inode_prepare(), which computes the correct number of items. To do so, it needs to know whether POSIX ACLs will be created, so move the ACL creation into that function. To reduce the number of arguments that need to be passed around for inode creation, define struct btrfs_new_inode_args containing all of the relevant information. btrfs_new_inode_prepare() will also be a good place to set up the fscrypt context and encrypted filename in the future. Reviewed-by: Sweet Tea Dorminy Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/acl.c | 36 +------- fs/btrfs/ctree.h | 34 ++++++-- fs/btrfs/inode.c | 249 ++++++++++++++++++++++++++++++++++++++++--------------- fs/btrfs/ioctl.c | 82 ++++++++++++------ 4 files changed, 270 insertions(+), 131 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index a6909ec9bc38..548d6a5477b4 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -55,8 +55,8 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu) return acl; } -static int __btrfs_set_acl(struct btrfs_trans_handle *trans, - struct inode *inode, struct posix_acl *acl, int type) +int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode, + struct posix_acl *acl, int type) { int ret, size = 0; const char *name; @@ -127,35 +127,3 @@ int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode, inode->i_mode = old_mode; return ret; } - -int btrfs_init_acl(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir) -{ - struct posix_acl *default_acl, *acl; - int ret = 0; - - /* this happens with subvols */ - if (!dir) - return 0; - - ret = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); - if (ret) - return ret; - - if (default_acl) { - ret = __btrfs_set_acl(trans, inode, default_acl, - ACL_TYPE_DEFAULT); - posix_acl_release(default_acl); - } - - if (acl) { - if (!ret) - ret = __btrfs_set_acl(trans, inode, acl, - ACL_TYPE_ACCESS); - posix_acl_release(acl); - } - - if (!default_acl && !acl) - cache_no_acl(inode); - return ret; -} diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d8f7479c9d17..768339a3f99d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3284,11 +3284,32 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, unsigned int extra_bits, struct extent_state **cached_state); +struct btrfs_new_inode_args { + /* Input */ + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool orphan; + bool subvol; + + /* + * Output from btrfs_new_inode_prepare(), input to + * btrfs_create_new_inode(). + */ + struct posix_acl *default_acl; + struct posix_acl *acl; +}; +int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, + unsigned int *trans_num_items); +int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + struct btrfs_new_inode_args *args, + u64 *index); +void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args); struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, struct inode *dir); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, struct btrfs_root *parent_root, - struct inode *inode); + struct btrfs_new_inode_args *args); void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, unsigned *bits); void btrfs_clear_delalloc_extent(struct inode *inode, @@ -3846,15 +3867,16 @@ static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu); int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode, struct posix_acl *acl, int type); -int btrfs_init_acl(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir); +int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode, + struct posix_acl *acl, int type); #else #define btrfs_get_acl NULL #define btrfs_set_acl NULL -static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir) +static inline int __btrfs_set_acl(struct btrfs_trans_handle *trans, + struct inode *inode, struct posix_acl *acl, + int type) { - return 0; + return -EOPNOTSUPP; } #endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ad446bbebf23..beafdb4ff873 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -222,15 +222,25 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, static int btrfs_dirty_inode(struct inode *inode); static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, - struct inode *inode, struct inode *dir, - const struct qstr *qstr) + struct btrfs_new_inode_args *args) { int err; - err = btrfs_init_acl(trans, inode, dir); - if (!err) - err = btrfs_xattr_security_init(trans, inode, dir, qstr); - return err; + if (args->default_acl) { + err = __btrfs_set_acl(trans, args->inode, args->default_acl, + ACL_TYPE_DEFAULT); + if (err) + return err; + } + if (args->acl) { + err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); + if (err) + return err; + } + if (!args->default_acl && !args->acl) + cache_no_acl(args->inode); + return btrfs_xattr_security_init(trans, args->inode, args->dir, + &args->dentry->d_name); } /* @@ -6038,6 +6048,54 @@ static int btrfs_insert_inode_locked(struct inode *inode) btrfs_find_actor, &args); } +int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, + unsigned int *trans_num_items) +{ + struct inode *dir = args->dir; + struct inode *inode = args->inode; + int ret; + + ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); + if (ret) + return ret; + + /* 1 to add inode item */ + *trans_num_items = 1; + /* 1 to add compression property */ + if (BTRFS_I(dir)->prop_compress) + (*trans_num_items)++; + /* 1 to add default ACL xattr */ + if (args->default_acl) + (*trans_num_items)++; + /* 1 to add access ACL xattr */ + if (args->acl) + (*trans_num_items)++; +#ifdef CONFIG_SECURITY + /* 1 to add LSM xattr */ + if (dir->i_security) + (*trans_num_items)++; +#endif + if (args->orphan) { + /* 1 to add orphan item */ + (*trans_num_items)++; + } else { + /* + * 1 to add inode ref + * 1 to add dir item + * 1 to add dir index + * 1 to update parent inode item + */ + *trans_num_items += 4; + } + return 0; +} + +void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args) +{ + posix_acl_release(args->acl); + posix_acl_release(args->default_acl); +} + /* * Inherit flags from the parent inode. * @@ -6069,12 +6127,16 @@ static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) btrfs_sync_inode_flags_to_i_flags(inode); } -static int btrfs_new_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, - struct inode *dir, const char *name, int name_len, +int btrfs_create_new_inode(struct btrfs_trans_handle *trans, + struct btrfs_new_inode_args *args, u64 *index) { - struct btrfs_fs_info *fs_info = root->fs_info; + struct inode *dir = args->subvol ? NULL : args->dir; + struct inode *inode = args->inode; + const char *name; + int name_len; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_root *root; struct btrfs_inode_item *inode_item; struct btrfs_key *location; struct btrfs_path *path; @@ -6086,6 +6148,17 @@ static int btrfs_new_inode(struct btrfs_trans_handle *trans, unsigned long ptr; int ret; + if (args->subvol) { + name = ".."; + name_len = 2; + } else if (args->orphan) { + name = NULL; + name_len = 0; + } else { + name = args->dentry->d_name.name; + name_len = args->dentry->d_name.len; + } + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -6097,6 +6170,10 @@ static int btrfs_new_inode(struct btrfs_trans_handle *trans, if (!name) set_nlink(inode, 0); + if (!args->subvol) + BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); + root = BTRFS_I(inode)->root; + ret = btrfs_get_free_objectid(root, &objectid); if (ret) { btrfs_free_path(path); @@ -6122,8 +6199,6 @@ static int btrfs_new_inode(struct btrfs_trans_handle *trans, */ BTRFS_I(inode)->index_cnt = 2; BTRFS_I(inode)->dir_index = *index; - if (!BTRFS_I(inode)->root) - BTRFS_I(inode)->root = btrfs_grab_root(root); BTRFS_I(inode)->generation = trans->transid; inode->i_generation = BTRFS_I(inode)->generation; @@ -6331,30 +6406,37 @@ static int btrfs_create_common(struct inode *dir, struct dentry *dentry, { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, + .dentry = dentry, + .inode = inode, + }; + unsigned int trans_num_items; struct btrfs_trans_handle *trans; int err; u64 index = 0; - /* - * 2 for inode item and ref - * 2 for dir items - * 1 for xattr if selinux is on - */ - trans = btrfs_start_transaction(root, 5); + err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (err) { + iput(inode); + return err; + } + + trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { iput(inode); - return PTR_ERR(trans); + err = PTR_ERR(trans); + goto out_new_inode_args; } - err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, - dentry->d_name.len, &index); + err = btrfs_create_new_inode(trans, &new_inode_args, &index); if (err) { iput(inode); inode = NULL; goto out_unlock; } - err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); + err = btrfs_init_inode_security(trans, &new_inode_args); if (err) goto out_unlock; @@ -6376,6 +6458,8 @@ out_unlock: discard_new_inode(inode); } btrfs_btree_balance_dirty(fs_info); +out_new_inode_args: + btrfs_new_inode_args_destroy(&new_inode_args); return err; } @@ -8653,13 +8737,14 @@ struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, */ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, struct btrfs_root *parent_root, - struct inode *inode) + struct btrfs_new_inode_args *args) { + struct inode *inode = args->inode; struct btrfs_root *new_root = BTRFS_I(inode)->root; int err; u64 index = 0; - err = btrfs_new_inode(trans, new_root, inode, NULL, "..", 2, &index); + err = btrfs_create_new_inode(trans, args, &index); if (err) return err; @@ -9164,22 +9249,22 @@ static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns, } static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode, struct inode *dir, - struct dentry *dentry) + struct btrfs_new_inode_args *args) { + struct inode *inode = args->inode; + struct inode *dir = args->dir; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct dentry *dentry = args->dentry; int ret; u64 index; - ret = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, - dentry->d_name.len, &index); + ret = btrfs_create_new_inode(trans, args, &index); if (ret) { iput(inode); return ret; } - ret = btrfs_init_inode_security(trans, inode, dir, - &dentry->d_name); + ret = btrfs_init_inode_security(trans, args); if (ret) goto out; @@ -9204,7 +9289,10 @@ static int btrfs_rename(struct user_namespace *mnt_userns, unsigned int flags) { struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); - struct inode *whiteout_inode; + struct btrfs_new_inode_args whiteout_args = { + .dir = old_dir, + .dentry = old_dentry, + }; struct btrfs_trans_handle *trans; unsigned int trans_num_items; struct btrfs_root *root = BTRFS_I(old_dir)->root; @@ -9260,9 +9348,15 @@ static int btrfs_rename(struct user_namespace *mnt_userns, filemap_flush(old_inode->i_mapping); if (flags & RENAME_WHITEOUT) { - whiteout_inode = new_whiteout_inode(mnt_userns, old_dir); - if (!whiteout_inode) + whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir); + if (!whiteout_args.inode) return -ENOMEM; + ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items); + if (ret) + goto out_whiteout_inode; + } else { + /* 1 to update the old parent inode. */ + trans_num_items = 1; } if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { @@ -9274,24 +9368,23 @@ static int btrfs_rename(struct user_namespace *mnt_userns, * 1 to add new root ref * 1 to add new root backref */ - trans_num_items = 4; + trans_num_items += 4; } else { /* * 1 to update inode * 1 to remove old inode ref * 1 to add new inode ref */ - trans_num_items = 3; + trans_num_items += 3; } /* * 1 to remove old dir item * 1 to remove old dir index - * 1 to update old parent inode * 1 to add new dir item * 1 to add new dir index - * 1 to update new parent inode (if it's not the same as the old parent) */ - trans_num_items += 6; + trans_num_items += 4; + /* 1 to update new parent inode if it's not the same as the old parent */ if (new_dir != old_dir) trans_num_items++; if (new_inode) { @@ -9304,8 +9397,6 @@ static int btrfs_rename(struct user_namespace *mnt_userns, */ trans_num_items += 5; } - if (flags & RENAME_WHITEOUT) - trans_num_items += 5; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { ret = PTR_ERR(trans); @@ -9401,9 +9492,8 @@ static int btrfs_rename(struct user_namespace *mnt_userns, rename_ctx.index, new_dentry->d_parent); if (flags & RENAME_WHITEOUT) { - ret = btrfs_whiteout_for_rename(trans, root, whiteout_inode, - old_dir, old_dentry); - whiteout_inode = NULL; + ret = btrfs_whiteout_for_rename(trans, &whiteout_args); + whiteout_args.inode = NULL; if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; @@ -9416,7 +9506,10 @@ out_notrans: if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); if (flags & RENAME_WHITEOUT) - iput(whiteout_inode); + btrfs_new_inode_args_destroy(&whiteout_args); +out_whiteout_inode: + if (flags & RENAME_WHITEOUT) + iput(whiteout_args.inode); return ret; } @@ -9636,6 +9729,11 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_path *path; struct btrfs_key key; struct inode *inode; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, + .dentry = dentry, + }; + unsigned int trans_num_items; int err; u64 index = 0; int name_len; @@ -9656,28 +9754,30 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, inode_nohighmem(inode); inode->i_mapping->a_ops = &btrfs_aops; - /* - * 2 items for inode item and ref - * 2 items for dir items - * 1 item for updating parent inode item - * 1 item for the inline extent item - * 1 item for xattr if selinux is on - */ - trans = btrfs_start_transaction(root, 7); + new_inode_args.inode = inode; + err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (err) { + iput(inode); + return err; + } + /* 1 additional item for the inline extent */ + trans_num_items++; + + trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { iput(inode); - return PTR_ERR(trans); + err = PTR_ERR(trans); + goto out_new_inode_args; } - err = btrfs_new_inode(trans, root, inode, dir, dentry->d_name.name, - dentry->d_name.len, &index); + err = btrfs_create_new_inode(trans, &new_inode_args, &index); if (err) { iput(inode); inode = NULL; goto out_unlock; } - err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); + err = btrfs_init_inode_security(trans, &new_inode_args); if (err) goto out_unlock; @@ -9736,6 +9836,8 @@ out_unlock: discard_new_inode(inode); } btrfs_btree_balance_dirty(fs_info); +out_new_inode_args: + btrfs_new_inode_args_destroy(&new_inode_args); return err; } @@ -9987,6 +10089,12 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; struct inode *inode; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, + .dentry = dentry, + .orphan = true, + }; + unsigned int trans_num_items; u64 index; int ret; @@ -9998,23 +10106,28 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, inode->i_op = &btrfs_file_inode_operations; inode->i_mapping->a_ops = &btrfs_aops; - /* - * 5 units required for adding orphan entry - */ - trans = btrfs_start_transaction(root, 5); + new_inode_args.inode = inode; + ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (ret) { + iput(inode); + return ret; + } + + trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { iput(inode); - return PTR_ERR(trans); + ret = PTR_ERR(trans); + goto out_new_inode_args; } - ret = btrfs_new_inode(trans, root, inode, dir, NULL, 0, &index); + ret = btrfs_create_new_inode(trans, &new_inode_args, &index); if (ret) { iput(inode); inode = NULL; goto out; } - ret = btrfs_init_inode_security(trans, inode, dir, NULL); + ret = btrfs_init_inode_security(trans, &new_inode_args); if (ret) goto out; @@ -10026,9 +10139,9 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, goto out; /* - * We set number of links to 0 in btrfs_new_inode(), and here we set - * it to 1 because d_tmpfile() will issue a warning if the count is 0, - * through: + * We set number of links to 0 in btrfs_create_new_inode(), and here we + * set it to 1 because d_tmpfile() will issue a warning if the count is + * 0, through: * * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() */ @@ -10041,6 +10154,8 @@ out: if (ret && inode) discard_new_inode(inode); btrfs_btree_balance_dirty(fs_info); +out_new_inode_args: + btrfs_new_inode_args_destroy(&new_inode_args); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4b903517fdc3..bd10b8d44b5d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -544,6 +544,33 @@ int __pure btrfs_is_empty_uuid(u8 *uuid) return 1; } +/* + * Calculate the number of transaction items to reserve for creating a subvolume + * or snapshot, not including the inode, directory entries, or parent directory. + */ +static unsigned int create_subvol_num_items(struct btrfs_qgroup_inherit *inherit) +{ + /* + * 1 to add root block + * 1 to add root item + * 1 to add root ref + * 1 to add root backref + * 1 to add UUID item + * 1 to add qgroup info + * 1 to add qgroup limit + * + * Ideally the last two would only be accounted if qgroups are enabled, + * but that can change between now and the time we would insert them. + */ + unsigned int num_items = 7; + + if (inherit) { + /* 2 to add qgroup relations for each inherited qgroup */ + num_items += 2 * inherit->num_qgroups; + } + return num_items; +} + static noinline int create_subvol(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, struct btrfs_qgroup_inherit *inherit) @@ -560,7 +587,12 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, struct btrfs_root *new_root; struct btrfs_block_rsv block_rsv; struct timespec64 cur_time = current_time(dir); - struct inode *inode; + struct btrfs_new_inode_args new_inode_args = { + .dir = dir, + .dentry = dentry, + .subvol = true, + }; + unsigned int trans_num_items; int ret; dev_t anon_dev; u64 objectid; @@ -587,26 +619,27 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, if (ret < 0) goto out_root_item; - inode = btrfs_new_subvol_inode(mnt_userns, dir); - if (!inode) { + new_inode_args.inode = btrfs_new_subvol_inode(mnt_userns, dir); + if (!new_inode_args.inode) { ret = -ENOMEM; goto out_anon_dev; } + ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); + if (ret) + goto out_inode; + trans_num_items += create_subvol_num_items(inherit); btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); - /* - * The same as the snapshot creation, please see the comment - * of create_snapshot(). - */ - ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, false); + ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, + trans_num_items, false); if (ret) - goto out_inode; + goto out_new_inode_args; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_subvolume_release_metadata(root, &block_rsv); - goto out_inode; + goto out_new_inode_args; } trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; @@ -689,8 +722,8 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, } /* anon_dev is owned by new_root now. */ anon_dev = 0; - BTRFS_I(inode)->root = new_root; - /* ... and new_root is owned by inode now. */ + BTRFS_I(new_inode_args.inode)->root = new_root; + /* ... and new_root is owned by new_inode_args.inode now. */ ret = btrfs_record_root_in_trans(trans, new_root); if (ret) { @@ -698,7 +731,7 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, goto out; } - ret = btrfs_create_subvol_root(trans, root, inode); + ret = btrfs_create_subvol_root(trans, root, &new_inode_args); if (ret) { /* We potentially lose an unused inode item here */ btrfs_abort_transaction(trans, ret); @@ -751,11 +784,13 @@ out: ret = btrfs_commit_transaction(trans); if (!ret) { - d_instantiate(dentry, inode); - inode = NULL; + d_instantiate(dentry, new_inode_args.inode); + new_inode_args.inode = NULL; } +out_new_inode_args: + btrfs_new_inode_args_destroy(&new_inode_args); out_inode: - iput(inode); + iput(new_inode_args.inode); out_anon_dev: if (anon_dev) free_anon_bdev(anon_dev); @@ -771,6 +806,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; + unsigned int trans_num_items; struct btrfs_trans_handle *trans; int ret; @@ -808,16 +844,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, btrfs_init_block_rsv(&pending_snapshot->block_rsv, BTRFS_BLOCK_RSV_TEMP); /* - * 1 - parent dir inode - * 2 - dir entries - * 1 - root item - * 2 - root ref/backref - * 1 - root of snapshot - * 1 - UUID item + * 1 to add dir item + * 1 to add dir index + * 1 to update parent inode item */ + trans_num_items = create_subvol_num_items(inherit) + 3; ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, - &pending_snapshot->block_rsv, 8, - false); + &pending_snapshot->block_rsv, + trans_num_items, false); if (ret) goto free_pending; -- cgit v1.2.3 From caae78e032343df525b8d05c58b462827f10b2a3 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 14 Mar 2022 18:12:35 -0700 Subject: btrfs: move common inode creation code into btrfs_create_new_inode() All of our inode creation code paths duplicate the calls to btrfs_init_inode_security() and btrfs_add_link(). Subvolume creation additionally duplicates property inheritance and the call to btrfs_set_inode_index(). Fix this by moving the common code into btrfs_create_new_inode(). This accomplishes a few things at once: 1. It reduces code duplication. 2. It allows us to set up the inode completely before inserting the inode item, removing calls to btrfs_update_inode(). 3. It fixes a leak of an inode on disk in some error cases. For example, in btrfs_create(), if btrfs_new_inode() succeeds, then we have inserted an inode item and its inode ref. However, if something after that fails (e.g., btrfs_init_inode_security()), then we end the transaction and then decrement the link count on the inode. If the transaction is committed and the system crashes before the failed inode is deleted, then we leak that inode on disk. Instead, this refactoring aborts the transaction when we can't recover more gracefully. 4. It exposes various ways that subvolume creation diverges from mkdir in terms of inheriting flags, properties, permissions, and POSIX ACLs, a lot of which appears to be accidental. This patch explicitly does _not_ change the existing non-standard behavior, but it makes those differences more clear in the code and documents them so that we can discuss whether they should be changed. Reviewed-by: Sweet Tea Dorminy Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 +- fs/btrfs/inode.c | 401 ++++++++++++++++++++++--------------------------------- fs/btrfs/ioctl.c | 45 +------ fs/btrfs/props.c | 40 +----- fs/btrfs/props.h | 4 - 5 files changed, 170 insertions(+), 326 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 768339a3f99d..9addfd5cdf4e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3302,14 +3302,10 @@ struct btrfs_new_inode_args { int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, unsigned int *trans_num_items); int btrfs_create_new_inode(struct btrfs_trans_handle *trans, - struct btrfs_new_inode_args *args, - u64 *index); + struct btrfs_new_inode_args *args); void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args); struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, struct inode *dir); -int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *parent_root, - struct btrfs_new_inode_args *args); void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, unsigned *bits); void btrfs_clear_delalloc_extent(struct inode *inode, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index beafdb4ff873..dbf059926141 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6105,9 +6105,6 @@ static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) { unsigned int flags; - if (!dir) - return; - flags = BTRFS_I(dir)->flags; if (flags & BTRFS_INODE_NOCOMPRESS) { @@ -6128,14 +6125,13 @@ static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) } int btrfs_create_new_inode(struct btrfs_trans_handle *trans, - struct btrfs_new_inode_args *args, - u64 *index) + struct btrfs_new_inode_args *args) { - struct inode *dir = args->subvol ? NULL : args->dir; + struct inode *dir = args->dir; struct inode *inode = args->inode; - const char *name; - int name_len; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + const char *name = args->orphan ? NULL : args->dentry->d_name.name; + int name_len = args->orphan ? 0 : args->dentry->d_name.len; + struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_root *root; struct btrfs_inode_item *inode_item; struct btrfs_key *location; @@ -6148,49 +6144,31 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, unsigned long ptr; int ret; - if (args->subvol) { - name = ".."; - name_len = 2; - } else if (args->orphan) { - name = NULL; - name_len = 0; - } else { - name = args->dentry->d_name.name; - name_len = args->dentry->d_name.len; - } - path = btrfs_alloc_path(); if (!path) return -ENOMEM; - /* - * O_TMPFILE, set link count to 0, so that after this point, - * we fill in an inode item with the correct link count. - */ - if (!name) - set_nlink(inode, 0); - if (!args->subvol) BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); root = BTRFS_I(inode)->root; ret = btrfs_get_free_objectid(root, &objectid); - if (ret) { - btrfs_free_path(path); - return ret; - } + if (ret) + goto out; inode->i_ino = objectid; - if (dir && name) { + if (args->orphan) { + /* + * O_TMPFILE, set link count to 0, so that after this point, we + * fill in an inode item with the correct link count. + */ + set_nlink(inode, 0); + } else { trace_btrfs_inode_request(dir); - ret = btrfs_set_inode_index(BTRFS_I(dir), index); - if (ret) { - btrfs_free_path(path); - return ret; - } - } else if (dir) { - *index = 0; + ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); + if (ret) + goto out; } /* * index_cnt is ignored for everything but a dir, @@ -6198,11 +6176,16 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, * number */ BTRFS_I(inode)->index_cnt = 2; - BTRFS_I(inode)->dir_index = *index; BTRFS_I(inode)->generation = trans->transid; inode->i_generation = BTRFS_I(inode)->generation; - btrfs_inherit_iflags(inode, dir); + /* + * Subvolumes don't inherit flags from their parent directory. + * Originally this was probably by accident, but we probably can't + * change it now without compatibility issues. + */ + if (!args->subvol) + btrfs_inherit_iflags(inode, dir); if (S_ISREG(inode->i_mode)) { if (btrfs_test_opt(fs_info, NODATASUM)) @@ -6212,6 +6195,55 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, BTRFS_INODE_NODATASUM; } + location = &BTRFS_I(inode)->location; + location->objectid = objectid; + location->offset = 0; + location->type = BTRFS_INODE_ITEM_KEY; + + ret = btrfs_insert_inode_locked(inode); + if (ret < 0) { + if (!args->orphan) + BTRFS_I(dir)->index_cnt--; + goto out; + } + + if (args->subvol) { + struct inode *parent; + + /* + * Subvolumes inherit properties from their parent subvolume, + * not the directory they were created in. + */ + parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, + BTRFS_I(dir)->root); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + } else { + ret = btrfs_inode_inherit_props(trans, inode, parent); + iput(parent); + } + } else { + ret = btrfs_inode_inherit_props(trans, inode, dir); + } + if (ret) { + btrfs_err(fs_info, + "error inheriting props for ino %llu (root %llu): %d", + btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, + ret); + } + + /* + * Subvolumes don't inherit ACLs or get passed to the LSM. This is + * probably a bug. + */ + if (!args->subvol) { + ret = btrfs_init_inode_security(trans, args); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto discard; + } + } + /* * We could have gotten an inode number from somebody who was fsynced * and then removed in this same transaction, so let's just set full @@ -6226,7 +6258,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, sizes[0] = sizeof(struct btrfs_inode_item); - if (name) { + if (!args->orphan) { /* * Start new inodes with an inode_ref. This is slightly more * efficient for small numbers of hard links since they will @@ -6235,53 +6267,59 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, */ key[1].objectid = objectid; key[1].type = BTRFS_INODE_REF_KEY; - if (dir) - key[1].offset = btrfs_ino(BTRFS_I(dir)); - else + if (args->subvol) { key[1].offset = objectid; - - sizes[1] = name_len + sizeof(*ref); + sizes[1] = 2 + sizeof(*ref); + } else { + key[1].offset = btrfs_ino(BTRFS_I(dir)); + sizes[1] = name_len + sizeof(*ref); + } } - location = &BTRFS_I(inode)->location; - location->objectid = objectid; - location->offset = 0; - location->type = BTRFS_INODE_ITEM_KEY; - - ret = btrfs_insert_inode_locked(inode); - if (ret < 0) - goto fail; - batch.keys = &key[0]; batch.data_sizes = &sizes[0]; - batch.total_data_size = sizes[0] + (name ? sizes[1] : 0); - batch.nr = name ? 2 : 1; + batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); + batch.nr = args->orphan ? 1 : 2; ret = btrfs_insert_empty_items(trans, root, path, &batch); - if (ret != 0) - goto fail_unlock; + if (ret != 0) { + btrfs_abort_transaction(trans, ret); + goto discard; + } inode->i_mtime = current_time(inode); inode->i_atime = inode->i_mtime; inode->i_ctime = inode->i_mtime; BTRFS_I(inode)->i_otime = inode->i_mtime; + /* + * We're going to fill the inode item now, so at this point the inode + * must be fully initialized. + */ + inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, sizeof(*inode_item)); fill_inode_item(trans, path->nodes[0], inode_item, inode); - if (name) { + if (!args->orphan) { ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, struct btrfs_inode_ref); - btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); - btrfs_set_inode_ref_index(path->nodes[0], ref, *index); ptr = (unsigned long)(ref + 1); - write_extent_buffer(path->nodes[0], name, ptr, name_len); + if (args->subvol) { + btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); + btrfs_set_inode_ref_index(path->nodes[0], ref, 0); + write_extent_buffer(path->nodes[0], "..", ptr, 2); + } else { + btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); + btrfs_set_inode_ref_index(path->nodes[0], ref, + BTRFS_I(inode)->dir_index); + write_extent_buffer(path->nodes[0], name, ptr, name_len); + } } btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_free_path(path); + btrfs_release_path(path); inode_tree_add(inode); @@ -6290,24 +6328,28 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans, btrfs_update_root_times(trans, root); - ret = btrfs_inode_inherit_props(trans, inode, dir); - if (ret) - btrfs_err(fs_info, - "error inheriting props for ino %llu (root %llu): %d", - btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); + if (args->orphan) { + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + } else { + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, + name_len, 0, BTRFS_I(inode)->dir_index); + } + if (ret) { + btrfs_abort_transaction(trans, ret); + goto discard; + } - return 0; + ret = 0; + goto out; -fail_unlock: +discard: /* * discard_new_inode() calls iput(), but the caller owns the reference * to the inode. */ ihold(inode); discard_new_inode(inode); -fail: - if (dir && name) - BTRFS_I(dir)->index_cnt--; +out: btrfs_free_path(path); return ret; } @@ -6414,52 +6456,28 @@ static int btrfs_create_common(struct inode *dir, struct dentry *dentry, unsigned int trans_num_items; struct btrfs_trans_handle *trans; int err; - u64 index = 0; err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); - if (err) { - iput(inode); - return err; - } + if (err) + goto out_inode; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - iput(inode); err = PTR_ERR(trans); goto out_new_inode_args; } - err = btrfs_create_new_inode(trans, &new_inode_args, &index); - if (err) { - iput(inode); - inode = NULL; - goto out_unlock; - } - - err = btrfs_init_inode_security(trans, &new_inode_args); - if (err) - goto out_unlock; - - err = btrfs_update_inode(trans, root, BTRFS_I(inode)); - if (err) - goto out_unlock; - - err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), - dentry->d_name.name, dentry->d_name.len, 0, index); - if (err) - goto out_unlock; - - d_instantiate_new(dentry, inode); + err = btrfs_create_new_inode(trans, &new_inode_args); + if (!err) + d_instantiate_new(dentry, inode); -out_unlock: btrfs_end_transaction(trans); - if (err && inode) { - inode_dec_link_count(inode); - discard_new_inode(inode); - } btrfs_btree_balance_dirty(fs_info); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); +out_inode: + if (err) + iput(inode); return err; } @@ -8732,34 +8750,6 @@ struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns, return inode; } -/* - * create a new subvolume directory/inode (helper for the ioctl). - */ -int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *parent_root, - struct btrfs_new_inode_args *args) -{ - struct inode *inode = args->inode; - struct btrfs_root *new_root = BTRFS_I(inode)->root; - int err; - u64 index = 0; - - err = btrfs_create_new_inode(trans, args, &index); - if (err) - return err; - - unlock_new_inode(inode); - - err = btrfs_subvol_inherit_props(trans, new_root, parent_root); - if (err) - btrfs_err(new_root->fs_info, - "error inheriting subvolume %llu properties: %d", - new_root->root_key.objectid, err); - - err = btrfs_update_inode(trans, new_root, BTRFS_I(inode)); - return err; -} - struct inode *btrfs_alloc_inode(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); @@ -9248,41 +9238,6 @@ static struct inode *new_whiteout_inode(struct user_namespace *mnt_userns, return inode; } -static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, - struct btrfs_new_inode_args *args) -{ - struct inode *inode = args->inode; - struct inode *dir = args->dir; - struct btrfs_root *root = BTRFS_I(dir)->root; - struct dentry *dentry = args->dentry; - int ret; - u64 index; - - ret = btrfs_create_new_inode(trans, args, &index); - if (ret) { - iput(inode); - return ret; - } - - ret = btrfs_init_inode_security(trans, args); - if (ret) - goto out; - - ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), - dentry->d_name.name, dentry->d_name.len, 0, index); - if (ret) - goto out; - - ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); -out: - unlock_new_inode(inode); - if (ret) - inode_dec_link_count(inode); - iput(inode); - - return ret; -} - static int btrfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, @@ -9492,11 +9447,14 @@ static int btrfs_rename(struct user_namespace *mnt_userns, rename_ctx.index, new_dentry->d_parent); if (flags & RENAME_WHITEOUT) { - ret = btrfs_whiteout_for_rename(trans, &whiteout_args); - whiteout_args.inode = NULL; + ret = btrfs_create_new_inode(trans, &whiteout_args); if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; + } else { + unlock_new_inode(whiteout_args.inode); + iput(whiteout_args.inode); + whiteout_args.inode = NULL; } } out_fail: @@ -9735,7 +9693,6 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, }; unsigned int trans_num_items; int err; - u64 index = 0; int name_len; int datasize; unsigned long ptr; @@ -9753,38 +9710,33 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, inode->i_op = &btrfs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &btrfs_aops; + btrfs_i_size_write(BTRFS_I(inode), name_len); + inode_set_bytes(inode, name_len); new_inode_args.inode = inode; err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); - if (err) { - iput(inode); - return err; - } + if (err) + goto out_inode; /* 1 additional item for the inline extent */ trans_num_items++; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - iput(inode); err = PTR_ERR(trans); goto out_new_inode_args; } - err = btrfs_create_new_inode(trans, &new_inode_args, &index); - if (err) { - iput(inode); - inode = NULL; - goto out_unlock; - } - - err = btrfs_init_inode_security(trans, &new_inode_args); + err = btrfs_create_new_inode(trans, &new_inode_args); if (err) - goto out_unlock; + goto out; path = btrfs_alloc_path(); if (!path) { err = -ENOMEM; - goto out_unlock; + btrfs_abort_transaction(trans, err); + discard_new_inode(inode); + inode = NULL; + goto out; } key.objectid = btrfs_ino(BTRFS_I(inode)); key.offset = 0; @@ -9793,8 +9745,11 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, err = btrfs_insert_empty_item(trans, root, path, &key, datasize); if (err) { + btrfs_abort_transaction(trans, err); btrfs_free_path(path); - goto out_unlock; + discard_new_inode(inode); + inode = NULL; + goto out; } leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], @@ -9812,32 +9767,16 @@ static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir, btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); - inode_set_bytes(inode, name_len); - btrfs_i_size_write(BTRFS_I(inode), name_len); - err = btrfs_update_inode(trans, root, BTRFS_I(inode)); - /* - * Last step, add directory indexes for our symlink inode. This is the - * last step to avoid extra cleanup of these indexes if an error happens - * elsewhere above. - */ - if (!err) - err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), - dentry->d_name.name, dentry->d_name.len, 0, - index); - if (err) - goto out_unlock; - d_instantiate_new(dentry, inode); - -out_unlock: + err = 0; +out: btrfs_end_transaction(trans); - if (err && inode) { - inode_dec_link_count(inode); - discard_new_inode(inode); - } btrfs_btree_balance_dirty(fs_info); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); +out_inode: + if (err) + iput(inode); return err; } @@ -10095,7 +10034,6 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, .orphan = true, }; unsigned int trans_num_items; - u64 index; int ret; inode = new_inode(dir->i_sb); @@ -10108,35 +10046,16 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, new_inode_args.inode = inode; ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); - if (ret) { - iput(inode); - return ret; - } + if (ret) + goto out_inode; trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - iput(inode); ret = PTR_ERR(trans); goto out_new_inode_args; } - ret = btrfs_create_new_inode(trans, &new_inode_args, &index); - if (ret) { - iput(inode); - inode = NULL; - goto out; - } - - ret = btrfs_init_inode_security(trans, &new_inode_args); - if (ret) - goto out; - - ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); - if (ret) - goto out; - ret = btrfs_orphan_add(trans, BTRFS_I(inode)); - if (ret) - goto out; + ret = btrfs_create_new_inode(trans, &new_inode_args); /* * We set number of links to 0 in btrfs_create_new_inode(), and here we @@ -10146,16 +10065,20 @@ static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() */ set_nlink(inode, 1); - d_tmpfile(dentry, inode); - unlock_new_inode(inode); - mark_inode_dirty(inode); -out: + + if (!ret) { + d_tmpfile(dentry, inode); + unlock_new_inode(inode); + mark_inode_dirty(inode); + } + btrfs_end_transaction(trans); - if (ret && inode) - discard_new_inode(inode); btrfs_btree_balance_dirty(fs_info); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); +out_inode: + if (ret) + iput(inode); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index bd10b8d44b5d..fe00dd88c281 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -575,8 +575,6 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, struct btrfs_qgroup_inherit *inherit) { - const char *name = dentry->d_name.name; - int namelen = dentry->d_name.len; struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_trans_handle *trans; struct btrfs_key key; @@ -596,7 +594,6 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, int ret; dev_t anon_dev; u64 objectid; - u64 index = 0; root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); if (!root_item) @@ -713,7 +710,6 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, free_extent_buffer(leaf); leaf = NULL; - key.offset = (u64)-1; new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); @@ -731,47 +727,21 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, goto out; } - ret = btrfs_create_subvol_root(trans, root, &new_inode_args); - if (ret) { - /* We potentially lose an unused inode item here */ - btrfs_abort_transaction(trans, ret); - goto out; - } - - /* - * insert the directory item - */ - ret = btrfs_set_inode_index(BTRFS_I(dir), &index); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto out; - } - - ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key, - BTRFS_FT_DIR, index); - if (ret) { - btrfs_abort_transaction(trans, ret); - goto out; - } - - btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); - ret = btrfs_update_inode(trans, root, BTRFS_I(dir)); + ret = btrfs_uuid_tree_add(trans, root_item->uuid, + BTRFS_UUID_KEY_SUBVOL, objectid); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } - ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid, - btrfs_ino(BTRFS_I(dir)), index, name, namelen); + ret = btrfs_create_new_inode(trans, &new_inode_args); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } - ret = btrfs_uuid_tree_add(trans, root_item->uuid, - BTRFS_UUID_KEY_SUBVOL, objectid); - if (ret) - btrfs_abort_transaction(trans, ret); + d_instantiate_new(dentry, new_inode_args.inode); + new_inode_args.inode = NULL; out: trans->block_rsv = NULL; @@ -782,11 +752,6 @@ out: btrfs_end_transaction(trans); else ret = btrfs_commit_transaction(trans); - - if (!ret) { - d_instantiate(dentry, new_inode_args.inode); - new_inode_args.inode = NULL; - } out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); out_inode: diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index 1b31481f9e72..a2ec8ecae8de 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -380,9 +380,8 @@ static struct prop_handler prop_handlers[] = { }, }; -static int inherit_props(struct btrfs_trans_handle *trans, - struct inode *inode, - struct inode *parent) +int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans, + struct inode *inode, struct inode *parent) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = root->fs_info; @@ -457,41 +456,6 @@ static int inherit_props(struct btrfs_trans_handle *trans, return 0; } -int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans, - struct inode *inode, - struct inode *dir) -{ - if (!dir) - return 0; - - return inherit_props(trans, inode, dir); -} - -int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_root *parent_root) -{ - struct super_block *sb = root->fs_info->sb; - struct inode *parent_inode, *child_inode; - int ret; - - parent_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, parent_root); - if (IS_ERR(parent_inode)) - return PTR_ERR(parent_inode); - - child_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, root); - if (IS_ERR(child_inode)) { - iput(parent_inode); - return PTR_ERR(child_inode); - } - - ret = inherit_props(trans, child_inode, parent_inode); - iput(child_inode); - iput(parent_inode); - - return ret; -} - void __init btrfs_props_init(void) { int i; diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h index 59bea741cfcf..ca9dd3df129b 100644 --- a/fs/btrfs/props.h +++ b/fs/btrfs/props.h @@ -23,8 +23,4 @@ int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir); -int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_root *parent_root); - #endif -- cgit v1.2.3 From 63c34cb4c6dddd7899a14ed0a11b208a41e9c85c Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 15 Mar 2022 15:22:41 +0000 Subject: btrfs: add and use helper to assert an inode range is clean We have four different scenarios where we don't expect to find ordered extents after locking a file range: 1) During plain fallocate; 2) During hole punching; 3) During zero range; 4) During reflinks (both cloning and deduplication). This is because in all these cases we follow the pattern: 1) Lock the inode's VFS lock in exclusive mode; 2) Lock the inode's i_mmap_lock in exclusive node, to serialize with mmap writes; 3) Flush delalloc in a file range and wait for all ordered extents to complete - both done through btrfs_wait_ordered_range(); 4) Lock the file range in the inode's io_tree. So add a helper that asserts that we don't have ordered extents for a given range. Make the four scenarios listed above use this helper after locking the respective file range. Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/file.c | 4 ++++ fs/btrfs/inode.c | 35 +++++++++++++++++++++++++++++++++++ fs/btrfs/reflink.c | 13 +++++++++++-- 4 files changed, 51 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9addfd5cdf4e..e43e7853f389 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3375,6 +3375,7 @@ void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags); void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes, const u64 del_bytes); +void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end); /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index fd90e25382b2..bd329316945f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2608,6 +2608,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, cached_state); } + + btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend); } static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans, @@ -3479,6 +3481,8 @@ static long btrfs_fallocate(struct file *file, int mode, lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, &cached_state); + btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end); + /* First, check if we exceed the qgroup limit */ INIT_LIST_HEAD(&reserve_list); while (cur_offset < alloc_end) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 31ccb82157c2..c4b68be7edd7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -11261,6 +11261,41 @@ void btrfs_update_inode_bytes(struct btrfs_inode *inode, spin_unlock(&inode->lock); } +/** + * Verify that there are no ordered extents for a given file range. + * + * @inode: The target inode. + * @start: Start offset of the file range, should be sector size aligned. + * @end: End offset (inclusive) of the file range, its value +1 should be + * sector size aligned. + * + * This should typically be used for cases where we locked an inode's VFS lock in + * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode, + * we have flushed all delalloc in the range, we have waited for all ordered + * extents in the range to complete and finally we have locked the file range in + * the inode's io_tree. + */ +void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) +{ + struct btrfs_root *root = inode->root; + struct btrfs_ordered_extent *ordered; + + if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) + return; + + ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); + if (ordered) { + btrfs_err(root->fs_info, +"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])", + start, end, btrfs_ino(inode), root->root_key.objectid, + ordered->file_offset, + ordered->file_offset + ordered->num_bytes - 1); + btrfs_put_ordered_extent(ordered); + } + + ASSERT(ordered == NULL); +} + static const struct inode_operations btrfs_dir_inode_operations = { .getattr = btrfs_getattr, .lookup = btrfs_lookup, diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index c257bf037cf2..c39f8b3a5a4a 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -614,14 +614,23 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, struct inode *inode2, u64 loff2, u64 len) { + u64 range1_end = loff1 + len - 1; + u64 range2_end = loff2 + len - 1; + if (inode1 < inode2) { swap(inode1, inode2); swap(loff1, loff2); + swap(range1_end, range2_end); } else if (inode1 == inode2 && loff2 < loff1) { swap(loff1, loff2); + swap(range1_end, range2_end); } - lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); - lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); + + lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end); + lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end); + + btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); + btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); } static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) -- cgit v1.2.3 From b0a66a3137bde6e011e2b14b135e8ea18facb68c Mon Sep 17 00:00:00 2001 From: Jonathan Lassoff Date: Thu, 17 Mar 2022 10:45:08 -0700 Subject: btrfs: add messages to printk index In order for end users to quickly react to new issues that come up in production, it is proving useful to leverage this printk indexing system. This printk index enables kernel developers to use calls to printk() with changeable ad-hoc format strings, while still enabling end users to detect changes and develop a semi-stable interface for detecting and parsing these messages. So that detailed Btrfs messages are captured by this printk index, this patch wraps btrfs_printk and btrfs_handle_fs_error with macros. Example of the generated list: https://lore.kernel.org/lkml/12588e13d51a9c3bf59467d3fc1ac2162f1275c1.1647539056.git.jof@thejof.com Signed-off-by: Jonathan Lassoff Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 41 ++++++++++++++++++++++++++++++++++++----- fs/btrfs/super.c | 6 +++--- 2 files changed, 39 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e43e7853f389..0d0ac093e4bf 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3451,11 +3451,29 @@ void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { } -#ifdef CONFIG_PRINTK +#ifdef CONFIG_PRINTK_INDEX + +#define btrfs_printk(fs_info, fmt, args...) \ +do { \ + printk_index_subsys_emit("%sBTRFS %s (device %s): ", NULL, fmt); \ + _btrfs_printk(fs_info, fmt, ##args); \ +} while (0) + +__printf(2, 3) +__cold +void _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); + +#elif defined(CONFIG_PRINTK) + +#define btrfs_printk(fs_info, fmt, args...) \ + _btrfs_printk(fs_info, fmt, ##args) + __printf(2, 3) __cold -void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); +void _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); + #else + #define btrfs_printk(fs_info, fmt, args...) \ btrfs_no_printk(fs_info, fmt, ##args) #endif @@ -3706,12 +3724,25 @@ do { \ __LINE__, (errno)); \ } while (0) +#ifdef CONFIG_PRINTK_INDEX + #define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ -do { \ - __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ - (errno), fmt, ##args); \ +do { \ + printk_index_subsys_emit( \ + "BTRFS: error (device %s%s) in %s:%d: errno=%d %s", \ + KERN_CRIT, fmt); \ + __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ + (errno), fmt, ##args); \ } while (0) +#else + +#define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ + __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ + (errno), fmt, ##args) + +#endif + #define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \ &(fs_info)->fs_state))) #define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b228efe8ab6e..206f44005c52 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -261,7 +261,7 @@ static struct ratelimit_state printk_limits[] = { RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100), }; -void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) +void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; struct va_format vaf; @@ -292,10 +292,10 @@ void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, . char statestr[STATE_STRING_BUF_LEN]; btrfs_state_to_string(fs_info, statestr); - printk("%sBTRFS %s (device %s%s): %pV\n", lvl, type, + _printk("%sBTRFS %s (device %s%s): %pV\n", lvl, type, fs_info->sb->s_id, statestr, &vaf); } else { - printk("%sBTRFS %s: %pV\n", lvl, type, &vaf); + _printk("%sBTRFS %s: %pV\n", lvl, type, &vaf); } } -- cgit v1.2.3 From 1a89f1738684cac05b4bbea80dd77e8f15176d3a Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 23 Mar 2022 16:19:26 +0000 Subject: btrfs: stop allocating a path when checking if cross reference exists At btrfs_cross_ref_exist() we always allocate a path, but we really don't need to because all its callers (only 2) already have an allocated path that is not being used when they call btrfs_cross_ref_exist(). So change btrfs_cross_ref_exist() to take a path as an argument and update both its callers to pass in the unused path they have when they call btrfs_cross_ref_exist(). Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 ++- fs/btrfs/extent-tree.c | 9 ++------- fs/btrfs/inode.c | 5 +++-- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0d0ac093e4bf..79399eb14535 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2784,7 +2784,8 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, - u64 objectid, u64 offset, u64 bytenr, bool strict); + u64 objectid, u64 offset, u64 bytenr, bool strict, + struct btrfs_path *path); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6aa92f84f465..fc5b9be06ec8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2357,15 +2357,10 @@ out: } int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, - u64 bytenr, bool strict) + u64 bytenr, bool strict, struct btrfs_path *path) { - struct btrfs_path *path; int ret; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - do { ret = check_committed_ref(root, path, objectid, offset, bytenr, strict); @@ -2376,7 +2371,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, } while (ret == -EAGAIN); out: - btrfs_free_path(path); + btrfs_release_path(path); if (btrfs_is_data_reloc_root(root)) WARN_ON(ret > 0); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 05cb7b1ab172..ca47f7c43ee6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1788,7 +1788,8 @@ next_slot: ret = btrfs_cross_ref_exist(root, ino, found_key.offset - - extent_offset, disk_bytenr, false); + extent_offset, disk_bytenr, + false, path); if (ret) { /* * ret could be -EIO if the above fails to read @@ -7222,7 +7223,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), key.offset - backref_offset, disk_bytenr, - strict); + strict, path); if (ret) { ret = 0; goto out; -- cgit v1.2.3 From d4135134ab8feb994369d44884733e8031b0f800 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 23 Mar 2022 16:19:30 +0000 Subject: btrfs: avoid blocking on space revervation when doing nowait dio writes When doing a NOWAIT direct IO write, if we can NOCOW then it means we can proceed with the non-blocking, NOWAIT path. However reserving the metadata space and qgroup meta space can often result in blocking - flushing delalloc, wait for ordered extents to complete, trigger transaction commits, etc, going against the semantics of a NOWAIT write. So make the NOWAIT write path to try to reserve all the metadata it needs without resulting in a blocking behaviour - if we get -ENOSPC or -EDQUOT then return -EAGAIN to make the caller fallback to a blocking direct IO write. This is part of a patchset comprised of the following patches: btrfs: avoid blocking on page locks with nowait dio on compressed range btrfs: avoid blocking nowait dio when locking file range btrfs: avoid double nocow check when doing nowait dio writes btrfs: stop allocating a path when checking if cross reference exists btrfs: free path at can_nocow_extent() before checking for checksum items btrfs: release path earlier at can_nocow_extent() btrfs: avoid blocking when allocating context for nowait dio read/write btrfs: avoid blocking on space revervation when doing nowait dio writes The following test was run before and after applying this patchset: $ cat io-uring-nodatacow-test.sh #!/bin/bash DEV=/dev/sdc MNT=/mnt/sdc MOUNT_OPTIONS="-o ssd -o nodatacow" MKFS_OPTIONS="-R free-space-tree -O no-holes" NUM_JOBS=4 FILE_SIZE=8G RUN_TIME=300 cat < /tmp/fio-job.ini [io_uring_rw] rw=randrw fsync=0 fallocate=posix group_reporting=1 direct=1 ioengine=io_uring iodepth=64 bssplit=4k/20:8k/20:16k/20:32k/10:64k/10:128k/5:256k/5:512k/5:1m/5 filesize=$FILE_SIZE runtime=$RUN_TIME time_based filename=foobar directory=$MNT numjobs=$NUM_JOBS thread EOF echo performance | \ tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor umount $MNT &> /dev/null mkfs.btrfs -f $MKFS_OPTIONS $DEV &> /dev/null mount $MOUNT_OPTIONS $DEV $MNT fio /tmp/fio-job.ini umount $MNT The test was run a 12 cores box with 64G of ram, using a non-debug kernel config (Debian's default config) and a spinning disk. Result before the patchset: READ: bw=407MiB/s (427MB/s), 407MiB/s-407MiB/s (427MB/s-427MB/s), io=119GiB (128GB), run=300175-300175msec WRITE: bw=407MiB/s (427MB/s), 407MiB/s-407MiB/s (427MB/s-427MB/s), io=119GiB (128GB), run=300175-300175msec Result after the patchset: READ: bw=436MiB/s (457MB/s), 436MiB/s-436MiB/s (457MB/s-457MB/s), io=128GiB (137GB), run=300044-300044msec WRITE: bw=435MiB/s (456MB/s), 435MiB/s-435MiB/s (456MB/s-456MB/s), io=128GiB (137GB), run=300044-300044msec That's about +7.2% throughput for reads and +6.9% for writes. Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/delalloc-space.c | 9 +++++---- fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 13 +++++++++---- fs/btrfs/qgroup.c | 5 +++-- fs/btrfs/qgroup.h | 12 ++++++++---- fs/btrfs/relocation.c | 3 ++- fs/btrfs/root-tree.c | 3 ++- 8 files changed, 31 insertions(+), 18 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 79399eb14535..be126aa2fa90 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2893,7 +2893,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root, void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, - u64 disk_num_bytes); + u64 disk_num_bytes, bool noflush); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index bd8267c4687d..36ab0859a263 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -289,7 +289,7 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info, } int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, - u64 disk_num_bytes) + u64 disk_num_bytes, bool noflush) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; @@ -308,7 +308,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, * If we have a transaction open (can happen if we call truncate_block * from truncate), then we need FLUSH_LIMIT so we don't deadlock. */ - if (btrfs_is_free_space_inode(inode)) { + if (noflush || btrfs_is_free_space_inode(inode)) { flush = BTRFS_RESERVE_NO_FLUSH; } else { if (current->journal_info) @@ -333,7 +333,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, */ calc_inode_reservations(fs_info, num_bytes, disk_num_bytes, &meta_reserve, &qgroup_reserve); - ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true); + ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true, + noflush); if (ret) return ret; ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, meta_reserve, flush); @@ -456,7 +457,7 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, ret = btrfs_check_data_free_space(inode, reserved, start, len); if (ret < 0) return ret; - ret = btrfs_delalloc_reserve_metadata(inode, len, len); + ret = btrfs_delalloc_reserve_metadata(inode, len, len, false); if (ret < 0) { btrfs_free_reserved_data_space(inode, *reserved, start, len); extent_changeset_free(*reserved); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ceac806155b8..b64fb93d9046 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1684,7 +1684,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, WARN_ON(reserve_bytes == 0); ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), reserve_bytes, - reserve_bytes); + reserve_bytes, false); if (ret) { if (!only_release_metadata) btrfs_free_reserved_data_space(BTRFS_I(inode), diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4254c3c7b9f7..b3f2010d9df5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4705,7 +4705,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, goto out; } } - ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize); + ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false); if (ret < 0) { if (!only_release_metadata) btrfs_free_reserved_data_space(inode, data_reserved, @@ -7415,6 +7415,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, u64 start, u64 len, unsigned int iomap_flags) { + const bool nowait = (iomap_flags & IOMAP_NOWAIT); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em = *map; int type; @@ -7454,12 +7455,15 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, struct extent_map *em2; /* We can NOCOW, so only need to reserve metadata space. */ - ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, + nowait); if (ret < 0) { /* Our caller expects us to free the input extent map. */ free_extent_map(em); *map = NULL; btrfs_dec_nocow_writers(fs_info, block_start); + if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) + ret = -EAGAIN; goto out; } space_reserved = true; @@ -7483,7 +7487,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, free_extent_map(em); *map = NULL; - if (iomap_flags & IOMAP_NOWAIT) + if (nowait) return -EAGAIN; /* We have to COW, so need to reserve metadata and data space. */ @@ -10801,7 +10805,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes); if (ret) goto out_free_data_space; - ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes); + ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes, + false); if (ret) goto out_qgroup_free_data; diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a9fed8195483..db723c0026bd 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3939,12 +3939,13 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, } int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, - enum btrfs_qgroup_rsv_type type, bool enforce) + enum btrfs_qgroup_rsv_type type, bool enforce, + bool noflush) { int ret; ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); - if (ret <= 0 && ret != -EDQUOT) + if ((ret <= 0 && ret != -EDQUOT) || noflush) return ret; ret = try_flush_qgroup(root); diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 880e9df0dac1..0c4dd2a9af96 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -364,19 +364,23 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode, int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, enum btrfs_qgroup_rsv_type type, bool enforce); int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, - enum btrfs_qgroup_rsv_type type, bool enforce); + enum btrfs_qgroup_rsv_type type, bool enforce, + bool noflush); /* Reserve metadata space for pertrans and prealloc type */ static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root, int num_bytes, bool enforce) { return __btrfs_qgroup_reserve_meta(root, num_bytes, - BTRFS_QGROUP_RSV_META_PERTRANS, enforce); + BTRFS_QGROUP_RSV_META_PERTRANS, + enforce, false); } static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root, - int num_bytes, bool enforce) + int num_bytes, bool enforce, + bool noflush) { return __btrfs_qgroup_reserve_meta(root, num_bytes, - BTRFS_QGROUP_RSV_META_PREALLOC, enforce); + BTRFS_QGROUP_RSV_META_PREALLOC, + enforce, noflush); } void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fdc2c4b411f0..b1c36fc72ffa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2997,7 +2997,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, /* Reserve metadata for this range */ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), - clamped_len, clamped_len); + clamped_len, clamped_len, + false); if (ret) goto release_page; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index ca7426ef61c8..a64b26b16904 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -509,7 +509,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, /* One for parent inode, two for dir entries */ qgroup_num_bytes = 3 * fs_info->nodesize; ret = btrfs_qgroup_reserve_meta_prealloc(root, - qgroup_num_bytes, true); + qgroup_num_bytes, true, + false); if (ret) return ret; } -- cgit v1.2.3 From 8e010b3d7043e61fc0bbfe27a57dad674eef7340 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Mar 2022 17:52:09 +0100 Subject: btrfs: remove the zoned/zone_size union in struct btrfs_fs_info Reading a value from a different member of a union is not just a great way to obfuscate code, but also creates an aliasing violation. Switch btrfs_is_zoned to look at ->zone_size and remove the union. Note: union was to simplify the detection of zoned filesystem but now this is wrapped behind btrfs_is_zoned so we can drop the union. Reviewed-by: Johannes Thumshirn Reviewed-by: Naohiro Aota Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba [ add note ] Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index be126aa2fa90..63fb20d3fcc6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1045,10 +1045,7 @@ struct btrfs_fs_info { * Zone size > 0 when in ZONED mode, otherwise it's used for a check * if the mode is enabled */ - union { - u64 zone_size; - u64 zoned; - }; + u64 zone_size; struct mutex zoned_meta_io_lock; spinlock_t treelog_bg_lock; @@ -4010,7 +4007,7 @@ static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info) { - return fs_info->zoned != 0; + return fs_info->zone_size > 0; } static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root) -- cgit v1.2.3 From 08dddb2951c96b53413cf1982e9358fa4c123183 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 13 Apr 2022 16:20:40 +0100 Subject: btrfs: use rbtree with leftmost node cached for tracking lowest block group We keep track of the start offset of the block group with the lowest start offset at fs_info->first_logical_byte. This requires explicitly updating that field every time we add, delete or lookup a block group to/from the red black tree at fs_info->block_group_cache_tree. Since the block group with the lowest start address happens to always be the one that is the leftmost node of the tree, we can use a red black tree that caches the left most node. Then when we need the start address of that block group, we can just quickly get the leftmost node in the tree and extract the start offset of that node's block group. This avoids the need to explicitly keep track of that address in the dedicated member fs_info->first_logical_byte, and it also allows the next patch in the series to switch the lock that protects the red black tree from a spin lock to a read/write lock - without this change it would be tricky because block group searches also update fs_info->first_logical_byte. Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 30 ++++++++++++------------------ fs/btrfs/ctree.h | 3 +-- fs/btrfs/disk-io.c | 3 +-- fs/btrfs/extent-tree.c | 22 +++++++++------------- fs/btrfs/free-space-cache.c | 2 +- fs/btrfs/free-space-tree.c | 2 +- 6 files changed, 25 insertions(+), 37 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 7bf10afab89c..a91938ab7ff8 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -168,11 +168,12 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, struct rb_node **p; struct rb_node *parent = NULL; struct btrfs_block_group *cache; + bool leftmost = true; ASSERT(block_group->length != 0); spin_lock(&info->block_group_cache_lock); - p = &info->block_group_cache_tree.rb_node; + p = &info->block_group_cache_tree.rb_root.rb_node; while (*p) { parent = *p; @@ -181,6 +182,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, p = &(*p)->rb_left; } else if (block_group->start > cache->start) { p = &(*p)->rb_right; + leftmost = false; } else { spin_unlock(&info->block_group_cache_lock); return -EEXIST; @@ -188,11 +190,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, } rb_link_node(&block_group->cache_node, parent, p); - rb_insert_color(&block_group->cache_node, - &info->block_group_cache_tree); - - if (info->first_logical_byte > block_group->start) - info->first_logical_byte = block_group->start; + rb_insert_color_cached(&block_group->cache_node, + &info->block_group_cache_tree, leftmost); spin_unlock(&info->block_group_cache_lock); @@ -211,7 +210,7 @@ static struct btrfs_block_group *block_group_cache_tree_search( u64 end, start; spin_lock(&info->block_group_cache_lock); - n = info->block_group_cache_tree.rb_node; + n = info->block_group_cache_tree.rb_root.rb_node; while (n) { cache = rb_entry(n, struct btrfs_block_group, cache_node); @@ -233,11 +232,8 @@ static struct btrfs_block_group *block_group_cache_tree_search( break; } } - if (ret) { + if (ret) btrfs_get_block_group(ret); - if (bytenr == 0 && info->first_logical_byte > ret->start) - info->first_logical_byte = ret->start; - } spin_unlock(&info->block_group_cache_lock); return ret; @@ -958,15 +954,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, goto out; spin_lock(&fs_info->block_group_cache_lock); - rb_erase(&block_group->cache_node, - &fs_info->block_group_cache_tree); + rb_erase_cached(&block_group->cache_node, + &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); /* Once for the block groups rbtree */ btrfs_put_block_group(block_group); - if (fs_info->first_logical_byte == block_group->start) - fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); @@ -4014,11 +4008,11 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) spin_unlock(&info->zone_active_bgs_lock); spin_lock(&info->block_group_cache_lock); - while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { + while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group, cache_node); - rb_erase(&block_group->cache_node, - &info->block_group_cache_tree); + rb_erase_cached(&block_group->cache_node, + &info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); spin_unlock(&info->block_group_cache_lock); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 63fb20d3fcc6..ae8a083aa1de 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -680,8 +680,7 @@ struct btrfs_fs_info { /* block group cache stuff */ spinlock_t block_group_cache_lock; - u64 first_logical_byte; - struct rb_root block_group_cache_tree; + struct rb_root_cached block_group_cache_tree; /* keep track of unallocated space */ atomic64_t free_chunk_space; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c8661709a425..7e8bb00720eb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3232,8 +3232,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) btrfs_init_async_reclaim_work(fs_info); spin_lock_init(&fs_info->block_group_cache_lock); - fs_info->block_group_cache_tree = RB_ROOT; - fs_info->first_logical_byte = (u64)-1; + fs_info->block_group_cache_tree = RB_ROOT_CACHED; extent_io_tree_init(fs_info, &fs_info->excluded_extents, IO_TREE_FS_EXCLUDED_EXTENTS, NULL); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2a718727541c..cd79a5f4c643 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2494,23 +2494,19 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) static u64 first_logical_byte(struct btrfs_fs_info *fs_info) { - struct btrfs_block_group *cache; - u64 bytenr; + struct rb_node *leftmost; + u64 bytenr = 0; spin_lock(&fs_info->block_group_cache_lock); - bytenr = fs_info->first_logical_byte; - spin_unlock(&fs_info->block_group_cache_lock); - - if (bytenr < (u64)-1) - return bytenr; - /* Get the block group with the lowest logical start address. */ - cache = btrfs_lookup_first_block_group(fs_info, 0); - if (!cache) - return 0; + leftmost = rb_first_cached(&fs_info->block_group_cache_tree); + if (leftmost) { + struct btrfs_block_group *bg; - bytenr = cache->start; - btrfs_put_block_group(cache); + bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); + bytenr = bg->start; + } + spin_unlock(&fs_info->block_group_cache_lock); return bytenr; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ef84bc5030cd..f7adee6fa05e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -4072,7 +4072,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info, btrfs_info(fs_info, "cleaning free space cache v1"); - node = rb_first(&fs_info->block_group_cache_tree); + node = rb_first_cached(&fs_info->block_group_cache_tree); while (node) { block_group = rb_entry(node, struct btrfs_block_group, cache_node); ret = btrfs_remove_free_space_inode(trans, NULL, block_group); diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 0ae54d8c10d6..1bf89aa67216 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -1178,7 +1178,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) goto abort; } - node = rb_first(&fs_info->block_group_cache_tree); + node = rb_first_cached(&fs_info->block_group_cache_tree); while (node) { block_group = rb_entry(node, struct btrfs_block_group, cache_node); -- cgit v1.2.3 From 16b0c2581e3a81e88872ff054fca8d8f5a92ca5e Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 13 Apr 2022 16:20:41 +0100 Subject: btrfs: use a read/write lock for protecting the block groups tree Currently we use a spin lock to protect the red black tree that we use to track block groups. Most accesses to that tree are actually read only and for large filesystems, with thousands of block groups, it actually has a bad impact on performance, as concurrent read only searches on the tree are serialized. Read only searches on the tree are very frequent and done when: 1) Pinning and unpinning extents, as we need to lookup the respective block group from the tree; 2) Freeing the last reference of a tree block, regardless if we pin the underlying extent or add it back to free space cache/tree; 3) During NOCOW writes, both buffered IO and direct IO, we need to check if the block group that contains an extent is read only or not and to increment the number of NOCOW writers in the block group. For those operations we need to search for the block group in the tree. Similarly, after creating the ordered extent for the NOCOW write, we need to decrement the number of NOCOW writers from the same block group, which requires searching for it in the tree; 4) Decreasing the number of extent reservations in a block group; 5) When allocating extents and freeing reserved extents; 6) Adding and removing free space to the free space tree; 7) When releasing delalloc bytes during ordered extent completion; 8) When relocating a block group; 9) During fitrim, to iterate over the block groups; 10) etc; Write accesses to the tree, to add or remove block groups, are much less frequent as they happen only when allocating a new block group or when deleting a block group. We also use the same spin lock to protect the list of currently caching block groups. Additions to this list are made when we need to cache a block group, because we don't have a free space cache for it (or we have but it's invalid), and removals from this list are done when caching of the block group's free space finishes. These cases are also not very common, but when they happen, they happen only once when the filesystem is mounted. So switch the lock that protects the tree of block groups from a spinning lock to a read/write lock. Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 40 ++++++++++++++++++++-------------------- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 4 ++-- fs/btrfs/transaction.c | 4 ++-- 5 files changed, 26 insertions(+), 26 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index a91938ab7ff8..2c42ce00b84d 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -172,7 +172,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, ASSERT(block_group->length != 0); - spin_lock(&info->block_group_cache_lock); + write_lock(&info->block_group_cache_lock); p = &info->block_group_cache_tree.rb_root.rb_node; while (*p) { @@ -184,7 +184,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, p = &(*p)->rb_right; leftmost = false; } else { - spin_unlock(&info->block_group_cache_lock); + write_unlock(&info->block_group_cache_lock); return -EEXIST; } } @@ -193,7 +193,7 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, rb_insert_color_cached(&block_group->cache_node, &info->block_group_cache_tree, leftmost); - spin_unlock(&info->block_group_cache_lock); + write_unlock(&info->block_group_cache_lock); return 0; } @@ -209,7 +209,7 @@ static struct btrfs_block_group *block_group_cache_tree_search( struct rb_node *n; u64 end, start; - spin_lock(&info->block_group_cache_lock); + read_lock(&info->block_group_cache_lock); n = info->block_group_cache_tree.rb_root.rb_node; while (n) { @@ -234,7 +234,7 @@ static struct btrfs_block_group *block_group_cache_tree_search( } if (ret) btrfs_get_block_group(ret); - spin_unlock(&info->block_group_cache_lock); + read_unlock(&info->block_group_cache_lock); return ret; } @@ -263,13 +263,13 @@ struct btrfs_block_group *btrfs_next_block_group( struct btrfs_fs_info *fs_info = cache->fs_info; struct rb_node *node; - spin_lock(&fs_info->block_group_cache_lock); + read_lock(&fs_info->block_group_cache_lock); /* If our block group was removed, we need a full search. */ if (RB_EMPTY_NODE(&cache->cache_node)) { const u64 next_bytenr = cache->start + cache->length; - spin_unlock(&fs_info->block_group_cache_lock); + read_unlock(&fs_info->block_group_cache_lock); btrfs_put_block_group(cache); cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; } @@ -280,7 +280,7 @@ struct btrfs_block_group *btrfs_next_block_group( btrfs_get_block_group(cache); } else cache = NULL; - spin_unlock(&fs_info->block_group_cache_lock); + read_unlock(&fs_info->block_group_cache_lock); return cache; } @@ -768,10 +768,10 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only cache->has_caching_ctl = 1; spin_unlock(&cache->lock); - spin_lock(&fs_info->block_group_cache_lock); + write_lock(&fs_info->block_group_cache_lock); refcount_inc(&caching_ctl->count); list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); - spin_unlock(&fs_info->block_group_cache_lock); + write_unlock(&fs_info->block_group_cache_lock); btrfs_get_block_group(cache); @@ -953,7 +953,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (ret) goto out; - spin_lock(&fs_info->block_group_cache_lock); + write_lock(&fs_info->block_group_cache_lock); rb_erase_cached(&block_group->cache_node, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); @@ -961,7 +961,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, /* Once for the block groups rbtree */ btrfs_put_block_group(block_group); - spin_unlock(&fs_info->block_group_cache_lock); + write_unlock(&fs_info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); /* @@ -986,7 +986,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (block_group->cached == BTRFS_CACHE_STARTED) btrfs_wait_block_group_cache_done(block_group); if (block_group->has_caching_ctl) { - spin_lock(&fs_info->block_group_cache_lock); + write_lock(&fs_info->block_group_cache_lock); if (!caching_ctl) { struct btrfs_caching_control *ctl; @@ -1000,7 +1000,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } if (caching_ctl) list_del_init(&caching_ctl->list); - spin_unlock(&fs_info->block_group_cache_lock); + write_unlock(&fs_info->block_group_cache_lock); if (caching_ctl) { /* Once for the caching bgs list and once for us. */ btrfs_put_caching_control(caching_ctl); @@ -3970,14 +3970,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) struct btrfs_caching_control *caching_ctl; struct rb_node *n; - spin_lock(&info->block_group_cache_lock); + write_lock(&info->block_group_cache_lock); while (!list_empty(&info->caching_block_groups)) { caching_ctl = list_entry(info->caching_block_groups.next, struct btrfs_caching_control, list); list_del(&caching_ctl->list); btrfs_put_caching_control(caching_ctl); } - spin_unlock(&info->block_group_cache_lock); + write_unlock(&info->block_group_cache_lock); spin_lock(&info->unused_bgs_lock); while (!list_empty(&info->unused_bgs)) { @@ -4007,14 +4007,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) } spin_unlock(&info->zone_active_bgs_lock); - spin_lock(&info->block_group_cache_lock); + write_lock(&info->block_group_cache_lock); while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group, cache_node); rb_erase_cached(&block_group->cache_node, &info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); - spin_unlock(&info->block_group_cache_lock); + write_unlock(&info->block_group_cache_lock); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); @@ -4037,9 +4037,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) ASSERT(block_group->swap_extents == 0); btrfs_put_block_group(block_group); - spin_lock(&info->block_group_cache_lock); + write_lock(&info->block_group_cache_lock); } - spin_unlock(&info->block_group_cache_lock); + write_unlock(&info->block_group_cache_lock); btrfs_release_global_block_rsv(info); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ae8a083aa1de..580a392d7c37 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -679,7 +679,7 @@ struct btrfs_fs_info { struct radix_tree_root fs_roots_radix; /* block group cache stuff */ - spinlock_t block_group_cache_lock; + rwlock_t block_group_cache_lock; struct rb_root_cached block_group_cache_tree; /* keep track of unallocated space */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7e8bb00720eb..cd51b12d174b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3231,7 +3231,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) btrfs_init_balance(fs_info); btrfs_init_async_reclaim_work(fs_info); - spin_lock_init(&fs_info->block_group_cache_lock); + rwlock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT_CACHED; extent_io_tree_init(fs_info, &fs_info->excluded_extents, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd79a5f4c643..963160a0c393 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2497,7 +2497,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info) struct rb_node *leftmost; u64 bytenr = 0; - spin_lock(&fs_info->block_group_cache_lock); + read_lock(&fs_info->block_group_cache_lock); /* Get the block group with the lowest logical start address. */ leftmost = rb_first_cached(&fs_info->block_group_cache_tree); if (leftmost) { @@ -2506,7 +2506,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info) bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); bytenr = bg->start; } - spin_unlock(&fs_info->block_group_cache_lock); + read_unlock(&fs_info->block_group_cache_lock); return bytenr; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index b008c5110958..875b801ab3d7 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -221,7 +221,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) * the caching thread will re-start it's search from 3, and thus find * the hole from [4,6) to add to the free space cache. */ - spin_lock(&fs_info->block_group_cache_lock); + write_lock(&fs_info->block_group_cache_lock); list_for_each_entry_safe(caching_ctl, next, &fs_info->caching_block_groups, list) { struct btrfs_block_group *cache = caching_ctl->block_group; @@ -234,7 +234,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) cache->last_byte_to_unpin = caching_ctl->progress; } } - spin_unlock(&fs_info->block_group_cache_lock); + write_unlock(&fs_info->block_group_cache_lock); up_write(&fs_info->commit_root_sem); } -- cgit v1.2.3 From 7aab8b32825eecd36ce8eef72dffd331724185da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 15 Apr 2022 16:33:24 +0200 Subject: btrfs: move btrfs_readpage to extent_io.c Keep btrfs_readpage next to btrfs_do_readpage and the other address space operations. This allows to keep submit_one_bio and struct btrfs_bio_ctrl file local in extent_io.c. Reviewed-by: Nikolay Borisov Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 - fs/btrfs/extent_io.c | 35 +++++++++++++++++++++++++++++++++-- fs/btrfs/extent_io.h | 16 +--------------- fs/btrfs/inode.c | 20 -------------------- 4 files changed, 34 insertions(+), 38 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 580a392d7c37..5b7e948ea651 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3313,7 +3313,6 @@ void btrfs_split_delalloc_extent(struct inode *inode, struct extent_state *orig, u64 split); void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end); vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf); -int btrfs_readpage(struct file *file, struct page *page); void btrfs_evict_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); struct inode *btrfs_alloc_inode(struct super_block *sb); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 779123e68d7b..74e40cf2d2d0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -137,6 +137,17 @@ struct tree_entry { struct rb_node rb_node; }; +/* + * Structure to record info about the bio being assembled, and other info like + * how many bytes are there before stripe/ordered extent boundary. + */ +struct btrfs_bio_ctrl { + struct bio *bio; + unsigned long bio_flags; + u32 len_to_stripe_boundary; + u32 len_to_oe_boundary; +}; + struct extent_page_data { struct btrfs_bio_ctrl bio_ctrl; /* tells writepage not to lock the state bits for this range @@ -166,7 +177,7 @@ static int add_extent_changeset(struct extent_state *state, u32 bits, return ret; } -void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags) +static void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags) { struct extent_io_tree *tree = bio->bi_private; @@ -3604,7 +3615,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, * XXX JDM: This needs looking at to ensure proper page locking * return 0 on success, otherwise return error */ -int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, +static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, struct btrfs_bio_ctrl *bio_ctrl, unsigned int read_flags, u64 *prev_em_start) { @@ -3793,6 +3804,26 @@ out: return ret; } +int btrfs_readpage(struct file *file, struct page *page) +{ + struct btrfs_inode *inode = BTRFS_I(page->mapping->host); + u64 start = page_offset(page); + u64 end = start + PAGE_SIZE - 1; + struct btrfs_bio_ctrl bio_ctrl = { 0 }; + int ret; + + btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); + + ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL); + /* + * If btrfs_do_readpage() failed we will want to submit the assembled + * bio to do the cleanup. + */ + if (bio_ctrl.bio) + submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags); + return ret; +} + static inline void contiguous_readpages(struct page *pages[], int nr_pages, u64 start, u64 end, struct extent_map **em_cached, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9a283b2358b8..c94df8e2ab9d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -102,17 +102,6 @@ struct extent_buffer { #endif }; -/* - * Structure to record info about the bio being assembled, and other info like - * how many bytes are there before stripe/ordered extent boundary. - */ -struct btrfs_bio_ctrl { - struct bio *bio; - unsigned long bio_flags; - u32 len_to_stripe_boundary; - u32 len_to_oe_boundary; -}; - /* * Structure to record how many bytes and which ranges are set/cleared */ @@ -178,10 +167,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_buffer(struct page *page); -void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags); -int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, - struct btrfs_bio_ctrl *bio_ctrl, - unsigned int read_flags, u64 *prev_em_start); +int btrfs_readpage(struct file *file, struct page *page); int extent_write_full_page(struct page *page, struct writeback_control *wbc); int extent_write_locked_range(struct inode *inode, u64 start, u64 end); int extent_writepages(struct address_space *mapping, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 44b7c9a7c84d..0b87fdeb7079 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8136,26 +8136,6 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return extent_fiemap(BTRFS_I(inode), fieinfo, start, len); } -int btrfs_readpage(struct file *file, struct page *page) -{ - struct btrfs_inode *inode = BTRFS_I(page->mapping->host); - u64 start = page_offset(page); - u64 end = start + PAGE_SIZE - 1; - struct btrfs_bio_ctrl bio_ctrl = { 0 }; - int ret; - - btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); - - ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL); - /* - * If btrfs_do_readpage() failed we will want to submit the assembled - * bio to do the cleanup. - */ - if (bio_ctrl.bio) - submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags); - return ret; -} - static int btrfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; -- cgit v1.2.3 From ad357938c6b4802c6f1f87c9a7811a33e240fa22 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 15 Apr 2022 16:33:28 +0200 Subject: btrfs: do not return errors from submit_bio_hook_t instances Both btrfs_repair_one_sector and submit_bio_one as the direct caller of one of the instances ignore errors as they expect the methods themselves to call ->bi_end_io on error. Remove the unused and dangerous return value. Reviewed-by: Qu Wenruo Reviewed-by: Nikolay Borisov Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 23 ++++++++--------------- 3 files changed, 11 insertions(+), 18 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5b7e948ea651..dd23f78664f1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3250,8 +3250,8 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz u64 btrfs_file_extent_end(const struct btrfs_path *path); /* inode.c */ -blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags); +void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags); unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio, u32 bio_offset, struct page *page, u64 start, u64 end); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index c94df8e2ab9d..b390ec79f9a8 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -71,7 +71,7 @@ struct btrfs_fs_info; struct io_failure_record; struct extent_io_tree; -typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio, +typedef void (submit_bio_hook_t)(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e3e4f8b41923..72b3bff2742e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2570,9 +2570,8 @@ out: * * c-3) otherwise: async submit */ -blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags) - +void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; @@ -2609,7 +2608,7 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio, */ btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); - return BLK_STS_OK; + return; } else { /* * Lookup bio sums does extra checks around whether we @@ -2643,7 +2642,6 @@ out: bio->bi_status = ret; bio_endio(bio); } - return ret; } /* @@ -7787,25 +7785,20 @@ static void btrfs_dio_private_put(struct btrfs_dio_private *dip) kfree(dip); } -static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio, - int mirror_num, - unsigned long bio_flags) +static void submit_dio_repair_bio(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags) { struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - blk_status_t ret; BUG_ON(bio_op(bio) == REQ_OP_WRITE); - ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); - if (ret) - return ret; + if (btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA)) + return; refcount_inc(&dip->refs); - ret = btrfs_map_bio(fs_info, bio, mirror_num); - if (ret) + if (btrfs_map_bio(fs_info, bio, mirror_num)) refcount_dec(&dip->refs); - return ret; } static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip, -- cgit v1.2.3 From a31b4a4368d28c5e780f0906588fbd1dcfe4ad54 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Apr 2022 06:43:09 +0200 Subject: btrfs: simplify WQ_HIGHPRI handling in struct btrfs_workqueue Just let the one caller that wants optional WQ_HIGHPRI handling allocate a separate btrfs_workqueue for that. This allows to rename struct __btrfs_workqueue to btrfs_workqueue, remove a pointer indirection and separate allocation for all btrfs_workqueue users and generally simplify the code. Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 122 ++++++++----------------------------------- fs/btrfs/async-thread.h | 7 +-- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 15 +++--- fs/btrfs/super.c | 1 + include/trace/events/btrfs.h | 30 +++++------ 6 files changed, 47 insertions(+), 129 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 43c89952b7d2..aac240430efe 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -15,13 +15,12 @@ enum { WORK_DONE_BIT, WORK_ORDER_DONE_BIT, - WORK_HIGH_PRIO_BIT, }; #define NO_THRESHOLD (-1) #define DFT_THRESHOLD (32) -struct __btrfs_workqueue { +struct btrfs_workqueue { struct workqueue_struct *normal_wq; /* File system this workqueue services */ @@ -48,12 +47,7 @@ struct __btrfs_workqueue { spinlock_t thres_lock; }; -struct btrfs_workqueue { - struct __btrfs_workqueue *normal; - struct __btrfs_workqueue *high; -}; - -struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq) { return wq->fs_info; } @@ -66,22 +60,22 @@ struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) { /* - * We could compare wq->normal->pending with num_online_cpus() + * We could compare wq->pending with num_online_cpus() * to support "thresh == NO_THRESHOLD" case, but it requires * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's * postpone it until someone needs the support of that case. */ - if (wq->normal->thresh == NO_THRESHOLD) + if (wq->thresh == NO_THRESHOLD) return false; - return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; + return atomic_read(&wq->pending) > wq->thresh * 2; } -static struct __btrfs_workqueue * -__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, - unsigned int flags, int limit_active, int thresh) +struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, + const char *name, unsigned int flags, + int limit_active, int thresh) { - struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); + struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -105,12 +99,8 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, ret->thresh = thresh; } - if (flags & WQ_HIGHPRI) - ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, - ret->current_active, name); - else - ret->normal_wq = alloc_workqueue("btrfs-%s", flags, - ret->current_active, name); + ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active, + name); if (!ret->normal_wq) { kfree(ret); return NULL; @@ -119,41 +109,7 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, INIT_LIST_HEAD(&ret->ordered_list); spin_lock_init(&ret->list_lock); spin_lock_init(&ret->thres_lock); - trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); - return ret; -} - -static inline void -__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); - -struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, - const char *name, - unsigned int flags, - int limit_active, - int thresh) -{ - struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); - - if (!ret) - return NULL; - - ret->normal = __btrfs_alloc_workqueue(fs_info, name, - flags & ~WQ_HIGHPRI, - limit_active, thresh); - if (!ret->normal) { - kfree(ret); - return NULL; - } - - if (flags & WQ_HIGHPRI) { - ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, - limit_active, thresh); - if (!ret->high) { - __btrfs_destroy_workqueue(ret->normal); - kfree(ret); - return NULL; - } - } + trace_btrfs_workqueue_alloc(ret, name); return ret; } @@ -162,7 +118,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, * This hook WILL be called in IRQ handler context, * so workqueue_set_max_active MUST NOT be called in this hook */ -static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) +static inline void thresh_queue_hook(struct btrfs_workqueue *wq) { if (wq->thresh == NO_THRESHOLD) return; @@ -174,7 +130,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) * This hook is called in kthread content. * So workqueue_set_max_active is called here. */ -static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) +static inline void thresh_exec_hook(struct btrfs_workqueue *wq) { int new_current_active; long pending; @@ -217,7 +173,7 @@ out: } } -static void run_ordered_work(struct __btrfs_workqueue *wq, +static void run_ordered_work(struct btrfs_workqueue *wq, struct btrfs_work *self) { struct list_head *list = &wq->ordered_list; @@ -305,7 +261,7 @@ static void btrfs_work_helper(struct work_struct *normal_work) { struct btrfs_work *work = container_of(normal_work, struct btrfs_work, normal_work); - struct __btrfs_workqueue *wq; + struct btrfs_workqueue *wq = work->wq; int need_order = 0; /* @@ -318,7 +274,6 @@ static void btrfs_work_helper(struct work_struct *normal_work) */ if (work->ordered_func) need_order = 1; - wq = work->wq; trace_btrfs_work_sched(work); thresh_exec_hook(wq); @@ -350,8 +305,7 @@ void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, work->flags = 0; } -static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, - struct btrfs_work *work) +void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) { unsigned long flags; @@ -366,54 +320,22 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, queue_work(wq->normal_wq, &work->normal_work); } -void btrfs_queue_work(struct btrfs_workqueue *wq, - struct btrfs_work *work) -{ - struct __btrfs_workqueue *dest_wq; - - if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) - dest_wq = wq->high; - else - dest_wq = wq->normal; - __btrfs_queue_work(dest_wq, work); -} - -static inline void -__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) -{ - destroy_workqueue(wq->normal_wq); - trace_btrfs_workqueue_destroy(wq); - kfree(wq); -} - void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) { if (!wq) return; - if (wq->high) - __btrfs_destroy_workqueue(wq->high); - __btrfs_destroy_workqueue(wq->normal); + destroy_workqueue(wq->normal_wq); + trace_btrfs_workqueue_destroy(wq); kfree(wq); } void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) { - if (!wq) - return; - wq->normal->limit_active = limit_active; - if (wq->high) - wq->high->limit_active = limit_active; -} - -void btrfs_set_work_high_priority(struct btrfs_work *work) -{ - set_bit(WORK_HIGH_PRIO_BIT, &work->flags); + if (wq) + wq->limit_active = limit_active; } void btrfs_flush_workqueue(struct btrfs_workqueue *wq) { - if (wq->high) - flush_workqueue(wq->high->normal_wq); - - flush_workqueue(wq->normal->normal_wq); + flush_workqueue(wq->normal_wq); } diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 3204daa51b95..07960529b360 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -11,8 +11,6 @@ struct btrfs_fs_info; struct btrfs_workqueue; -/* Internal use only */ -struct __btrfs_workqueue; struct btrfs_work; typedef void (*btrfs_func_t)(struct btrfs_work *arg); typedef void (*btrfs_work_func_t)(struct work_struct *arg); @@ -25,7 +23,7 @@ struct btrfs_work { /* Don't touch things below */ struct work_struct normal_work; struct list_head ordered_list; - struct __btrfs_workqueue *wq; + struct btrfs_workqueue *wq; unsigned long flags; }; @@ -40,9 +38,8 @@ void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); -void btrfs_set_work_high_priority(struct btrfs_work *work); struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work); -struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); +struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); void btrfs_flush_workqueue(struct btrfs_workqueue *wq); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index dd23f78664f1..aa3aea042ec5 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -847,6 +847,7 @@ struct btrfs_fs_info { * two */ struct btrfs_workqueue *workers; + struct btrfs_workqueue *hipri_workers; struct btrfs_workqueue *delalloc_workers; struct btrfs_workqueue *flush_workers; struct btrfs_workqueue *endio_workers; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d70289ef581a..807e7b272896 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -874,9 +874,9 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, async->status = 0; if (op_is_sync(bio->bi_opf)) - btrfs_set_work_high_priority(&async->work); - - btrfs_queue_work(fs_info->workers, &async->work); + btrfs_queue_work(fs_info->hipri_workers, &async->work); + else + btrfs_queue_work(fs_info->workers, &async->work); return 0; } @@ -2279,6 +2279,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) { btrfs_destroy_workqueue(fs_info->fixup_workers); btrfs_destroy_workqueue(fs_info->delalloc_workers); + btrfs_destroy_workqueue(fs_info->hipri_workers); btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); @@ -2457,7 +2458,9 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; fs_info->workers = - btrfs_alloc_workqueue(fs_info, "worker", + btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16); + fs_info->hipri_workers = + btrfs_alloc_workqueue(fs_info, "worker-high", flags | WQ_HIGHPRI, max_active, 16); fs_info->delalloc_workers = @@ -2505,8 +2508,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) fs_info->discard_ctl.discard_workers = alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); - if (!(fs_info->workers && fs_info->delalloc_workers && - fs_info->flush_workers && + if (!(fs_info->workers && fs_info->hipri_workers && + fs_info->delalloc_workers && fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_meta_write_workers && fs_info->endio_write_workers && fs_info->endio_raid56_workers && diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 206f44005c52..2236024aca64 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1903,6 +1903,7 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, old_pool_size, new_pool_size); btrfs_workqueue_set_max(fs_info->workers, new_pool_size); + btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f068ff30d654..290f07eb050a 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -24,7 +24,7 @@ struct btrfs_free_cluster; struct map_lookup; struct extent_buffer; struct btrfs_work; -struct __btrfs_workqueue; +struct btrfs_workqueue; struct btrfs_qgroup_extent_record; struct btrfs_qgroup; struct extent_io_tree; @@ -1457,42 +1457,36 @@ DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, TP_ARGS(work) ); -DECLARE_EVENT_CLASS(btrfs__workqueue, +DECLARE_EVENT_CLASS(btrfs_workqueue, - TP_PROTO(const struct __btrfs_workqueue *wq, - const char *name, int high), + TP_PROTO(const struct btrfs_workqueue *wq, const char *name), - TP_ARGS(wq, name, high), + TP_ARGS(wq, name), TP_STRUCT__entry_btrfs( __field( const void *, wq ) __string( name, name ) - __field( int , high ) ), TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), __entry->wq = wq; __assign_str(name, name); - __entry->high = high; ), - TP_printk_btrfs("name=%s%s wq=%p", __get_str(name), - __print_flags(__entry->high, "", - {(WQ_HIGHPRI), "-high"}), + TP_printk_btrfs("name=%s wq=%p", __get_str(name), __entry->wq) ); -DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, +DEFINE_EVENT(btrfs_workqueue, btrfs_workqueue_alloc, - TP_PROTO(const struct __btrfs_workqueue *wq, - const char *name, int high), + TP_PROTO(const struct btrfs_workqueue *wq, const char *name), - TP_ARGS(wq, name, high) + TP_ARGS(wq, name) ); -DECLARE_EVENT_CLASS(btrfs__workqueue_done, +DECLARE_EVENT_CLASS(btrfs_workqueue_done, - TP_PROTO(const struct __btrfs_workqueue *wq), + TP_PROTO(const struct btrfs_workqueue *wq), TP_ARGS(wq), @@ -1507,9 +1501,9 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done, TP_printk_btrfs("wq=%p", __entry->wq) ); -DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, +DEFINE_EVENT(btrfs_workqueue_done, btrfs_workqueue_destroy, - TP_PROTO(const struct __btrfs_workqueue *wq), + TP_PROTO(const struct btrfs_workqueue *wq), TP_ARGS(wq) ); -- cgit v1.2.3 From be539518262774ed04336e7c68354b823dc6c5e4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Apr 2022 06:43:10 +0200 Subject: btrfs: use normal workqueues for scrub All three scrub workqueues don't need ordered execution or thread disabling threshold (as the thresh parameter is less than DFT_THRESHOLD). Just switch to the normal workqueues that use a lot less resources, especially in the work_struct vs btrfs_work structures. Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 ++--- fs/btrfs/scrub.c | 79 +++++++++++++++++++++++++++----------------------------- fs/btrfs/super.c | 2 -- 3 files changed, 41 insertions(+), 46 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index aa3aea042ec5..6097166e39b6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -946,9 +946,9 @@ struct btrfs_fs_info { * running. */ refcount_t scrub_workers_refcnt; - struct btrfs_workqueue *scrub_workers; - struct btrfs_workqueue *scrub_wr_completion_workers; - struct btrfs_workqueue *scrub_parity_workers; + struct workqueue_struct *scrub_workers; + struct workqueue_struct *scrub_wr_completion_workers; + struct workqueue_struct *scrub_parity_workers; struct btrfs_subpage_info *subpage_info; struct btrfs_discard_ctl discard_ctl; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6ac711fa793c..3985225f27be 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -90,7 +90,7 @@ struct scrub_bio { struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO]; int sector_count; int next_free; - struct btrfs_work work; + struct work_struct work; }; struct scrub_block { @@ -110,7 +110,7 @@ struct scrub_block { /* It is for the data with checksum */ unsigned int data_corrected:1; }; - struct btrfs_work work; + struct work_struct work; }; /* Used for the chunks with parity stripe such RAID5/6 */ @@ -132,7 +132,7 @@ struct scrub_parity { struct list_head sectors_list; /* Work of parity check and repair */ - struct btrfs_work work; + struct work_struct work; /* Mark the parity blocks which have data */ unsigned long *dbitmap; @@ -231,7 +231,7 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len, u64 gen, int mirror_num, u8 *csum, u64 physical_for_dev_replace); static void scrub_bio_end_io(struct bio *bio); -static void scrub_bio_end_io_worker(struct btrfs_work *work); +static void scrub_bio_end_io_worker(struct work_struct *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, u64 extent_logical, u32 extent_len, @@ -242,7 +242,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, struct scrub_sector *sector); static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_bio_end_io(struct bio *bio); -static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); +static void scrub_wr_bio_end_io_worker(struct work_struct *work); static void scrub_put_ctx(struct scrub_ctx *sctx); static inline int scrub_is_page_on_raid56(struct scrub_sector *sector) @@ -587,8 +587,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( sbio->index = i; sbio->sctx = sctx; sbio->sector_count = 0; - btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, - NULL); + INIT_WORK(&sbio->work, scrub_bio_end_io_worker); if (i != SCRUB_BIOS_PER_SCTX - 1) sctx->bios[i]->next_free = i + 1; @@ -1718,11 +1717,11 @@ static void scrub_wr_bio_end_io(struct bio *bio) sbio->status = bio->bi_status; sbio->bio = bio; - btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); - btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); + INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker); + queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); } -static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) +static void scrub_wr_bio_end_io_worker(struct work_struct *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; @@ -2119,10 +2118,10 @@ static void scrub_missing_raid56_end_io(struct bio *bio) bio_put(bio); - btrfs_queue_work(fs_info->scrub_workers, &sblock->work); + queue_work(fs_info->scrub_workers, &sblock->work); } -static void scrub_missing_raid56_worker(struct btrfs_work *work) +static void scrub_missing_raid56_worker(struct work_struct *work) { struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_ctx *sctx = sblock->sctx; @@ -2212,7 +2211,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical); } - btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); + INIT_WORK(&sblock->work, scrub_missing_raid56_worker); scrub_block_get(sblock); scrub_pending_bio_inc(sctx); raid56_submit_missing_rbio(rbio); @@ -2332,10 +2331,10 @@ static void scrub_bio_end_io(struct bio *bio) sbio->status = bio->bi_status; sbio->bio = bio; - btrfs_queue_work(fs_info->scrub_workers, &sbio->work); + queue_work(fs_info->scrub_workers, &sbio->work); } -static void scrub_bio_end_io_worker(struct btrfs_work *work) +static void scrub_bio_end_io_worker(struct work_struct *work) { struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_ctx *sctx = sbio->sctx; @@ -2765,7 +2764,7 @@ static void scrub_free_parity(struct scrub_parity *sparity) kfree(sparity); } -static void scrub_parity_bio_endio_worker(struct btrfs_work *work) +static void scrub_parity_bio_endio_worker(struct work_struct *work) { struct scrub_parity *sparity = container_of(work, struct scrub_parity, work); @@ -2786,9 +2785,8 @@ static void scrub_parity_bio_endio(struct bio *bio) bio_put(bio); - btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, - NULL); - btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); + INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker); + queue_work(fs_info->scrub_parity_workers, &sparity->work); } static void scrub_parity_check_and_repair(struct scrub_parity *sparity) @@ -3958,22 +3956,23 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, &fs_info->scrub_lock)) { - struct btrfs_workqueue *scrub_workers = NULL; - struct btrfs_workqueue *scrub_wr_comp = NULL; - struct btrfs_workqueue *scrub_parity = NULL; - - scrub_workers = fs_info->scrub_workers; - scrub_wr_comp = fs_info->scrub_wr_completion_workers; - scrub_parity = fs_info->scrub_parity_workers; + struct workqueue_struct *scrub_workers = fs_info->scrub_workers; + struct workqueue_struct *scrub_wr_comp = + fs_info->scrub_wr_completion_workers; + struct workqueue_struct *scrub_parity = + fs_info->scrub_parity_workers; fs_info->scrub_workers = NULL; fs_info->scrub_wr_completion_workers = NULL; fs_info->scrub_parity_workers = NULL; mutex_unlock(&fs_info->scrub_lock); - btrfs_destroy_workqueue(scrub_workers); - btrfs_destroy_workqueue(scrub_wr_comp); - btrfs_destroy_workqueue(scrub_parity); + if (scrub_workers) + destroy_workqueue(scrub_workers); + if (scrub_wr_comp) + destroy_workqueue(scrub_wr_comp); + if (scrub_parity) + destroy_workqueue(scrub_parity); } } @@ -3983,9 +3982,9 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info) static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { - struct btrfs_workqueue *scrub_workers = NULL; - struct btrfs_workqueue *scrub_wr_comp = NULL; - struct btrfs_workqueue *scrub_parity = NULL; + struct workqueue_struct *scrub_workers = NULL; + struct workqueue_struct *scrub_wr_comp = NULL; + struct workqueue_struct *scrub_parity = NULL; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; int ret = -ENOMEM; @@ -3993,18 +3992,16 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) return 0; - scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, - is_dev_replace ? 1 : max_active, 4); + scrub_workers = alloc_workqueue("btrfs-scrub", flags, + is_dev_replace ? 1 : max_active); if (!scrub_workers) goto fail_scrub_workers; - scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, - max_active, 2); + scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active); if (!scrub_wr_comp) goto fail_scrub_wr_completion_workers; - scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, - max_active, 2); + scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active); if (!scrub_parity) goto fail_scrub_parity_workers; @@ -4025,11 +4022,11 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, mutex_unlock(&fs_info->scrub_lock); ret = 0; - btrfs_destroy_workqueue(scrub_parity); + destroy_workqueue(scrub_parity); fail_scrub_parity_workers: - btrfs_destroy_workqueue(scrub_wr_comp); + destroy_workqueue(scrub_wr_comp); fail_scrub_wr_completion_workers: - btrfs_destroy_workqueue(scrub_workers); + destroy_workqueue(scrub_workers); fail_scrub_workers: return ret; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2236024aca64..b1fdc6a26c76 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1913,8 +1913,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); - btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers, - new_pool_size); } static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, -- cgit v1.2.3 From 385de0ef387dc7f33fc5b828136cbc9516b3ec1a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Apr 2022 06:43:11 +0200 Subject: btrfs: use a normal workqueue for rmw_workers rmw_workers doesn't need ordered execution or thread disabling threshold (as the thresh parameter is less than DFT_THRESHOLD). Just switch to the normal workqueues that use a lot less resources, especially in the work_struct vs btrfs_work structures. Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/raid56.c | 29 ++++++++++++++--------------- 3 files changed, 18 insertions(+), 19 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6097166e39b6..f55b15437602 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -853,7 +853,7 @@ struct btrfs_fs_info { struct btrfs_workqueue *endio_workers; struct btrfs_workqueue *endio_meta_workers; struct btrfs_workqueue *endio_raid56_workers; - struct btrfs_workqueue *rmw_workers; + struct workqueue_struct *rmw_workers; struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_write_workers; struct btrfs_workqueue *endio_freespace_worker; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 807e7b272896..7ce945db1243 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2283,7 +2283,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); - btrfs_destroy_workqueue(fs_info->rmw_workers); + if (fs_info->rmw_workers) + destroy_workqueue(fs_info->rmw_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->delayed_workers); @@ -2492,8 +2493,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) fs_info->endio_raid56_workers = btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, max_active, 4); - fs_info->rmw_workers = - btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2); + fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active); fs_info->endio_write_workers = btrfs_alloc_workqueue(fs_info, "endio-write", flags, max_active, 2); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 31f16f86081b..a5b623ee6fac 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -88,7 +88,7 @@ struct btrfs_raid_bio { /* * for scheduling work in the helper threads */ - struct btrfs_work work; + struct work_struct work; /* * bio list and bio_list_lock are used @@ -196,8 +196,8 @@ struct btrfs_raid_bio { static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); static noinline void finish_rmw(struct btrfs_raid_bio *rbio); -static void rmw_work(struct btrfs_work *work); -static void read_rebuild_work(struct btrfs_work *work); +static void rmw_work(struct work_struct *work); +static void read_rebuild_work(struct work_struct *work); static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); static void __free_raid_bio(struct btrfs_raid_bio *rbio); @@ -206,12 +206,12 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check); -static void scrub_parity_work(struct btrfs_work *work); +static void scrub_parity_work(struct work_struct *work); -static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) +static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) { - btrfs_init_work(&rbio->work, work_func, NULL, NULL); - btrfs_queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); + INIT_WORK(&rbio->work, work_func); + queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); } /* @@ -1745,7 +1745,7 @@ struct btrfs_plug_cb { struct blk_plug_cb cb; struct btrfs_fs_info *info; struct list_head rbio_list; - struct btrfs_work work; + struct work_struct work; }; /* @@ -1813,7 +1813,7 @@ static void run_plug(struct btrfs_plug_cb *plug) * if the unplug comes from schedule, we have to push the * work off to a helper thread */ -static void unplug_work(struct btrfs_work *work) +static void unplug_work(struct work_struct *work) { struct btrfs_plug_cb *plug; plug = container_of(work, struct btrfs_plug_cb, work); @@ -1826,9 +1826,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) plug = container_of(cb, struct btrfs_plug_cb, cb); if (from_schedule) { - btrfs_init_work(&plug->work, unplug_work, NULL, NULL); - btrfs_queue_work(plug->info->rmw_workers, - &plug->work); + INIT_WORK(&plug->work, unplug_work); + queue_work(plug->info->rmw_workers, &plug->work); return; } run_plug(plug); @@ -2316,7 +2315,7 @@ int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc, } -static void rmw_work(struct btrfs_work *work) +static void rmw_work(struct work_struct *work) { struct btrfs_raid_bio *rbio; @@ -2324,7 +2323,7 @@ static void rmw_work(struct btrfs_work *work) raid56_rmw_stripe(rbio); } -static void read_rebuild_work(struct btrfs_work *work) +static void read_rebuild_work(struct work_struct *work) { struct btrfs_raid_bio *rbio; @@ -2781,7 +2780,7 @@ finish: validate_rbio_for_parity_scrub(rbio); } -static void scrub_parity_work(struct btrfs_work *work) +static void scrub_parity_work(struct work_struct *work) { struct btrfs_raid_bio *rbio; -- cgit v1.2.3 From 253bf57555e451dec5a7f09dc95d380ce8b10e5b Mon Sep 17 00:00:00 2001 From: Gabriel Niebler Date: Tue, 26 Apr 2022 11:43:04 +0200 Subject: btrfs: turn delayed_nodes_tree into an XArray MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … in the btrfs_root struct and adjust all usages of this object to use the XArray API, because it is notionally easier to use and understand, as it provides array semantics, and also takes care of locking for us, further simplifying the code. Also use the opportunity to do some light refactoring. Reviewed-by: Nikolay Borisov Signed-off-by: Gabriel Niebler Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 ++-- fs/btrfs/delayed-inode.c | 84 ++++++++++++++++++++++-------------------------- fs/btrfs/disk-io.c | 2 +- fs/btrfs/inode.c | 2 +- 4 files changed, 44 insertions(+), 50 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f55b15437602..9eebd96c6639 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1222,10 +1222,10 @@ struct btrfs_root { struct rb_root inode_tree; /* - * radix tree that keeps track of delayed nodes of every inode, - * protected by inode_lock + * Xarray that keeps track of delayed nodes of every inode, protected + * by inode_lock */ - struct radix_tree_root delayed_nodes_tree; + struct xarray delayed_nodes; /* * right now this just gets used so that a root has its own devid * for stat. It may be used for more later diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 748bf6b0d860..66779ab3ed4a 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -78,7 +78,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( } spin_lock(&root->inode_lock); - node = radix_tree_lookup(&root->delayed_nodes_tree, ino); + node = xa_load(&root->delayed_nodes, ino); if (node) { if (btrfs_inode->delayed_node) { @@ -90,9 +90,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( /* * It's possible that we're racing into the middle of removing - * this node from the radix tree. In this case, the refcount + * this node from the xarray. In this case, the refcount * was zero and it should never go back to one. Just return - * NULL like it was never in the radix at all; our release + * NULL like it was never in the xarray at all; our release * function is in the process of removing it. * * Some implementations of refcount_inc refuse to bump the @@ -100,7 +100,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( * here, refcount_inc() may decide to just WARN_ONCE() instead * of actually bumping the refcount. * - * If this node is properly in the radix, we want to bump the + * If this node is properly in the xarray, we want to bump the * refcount twice, once for the inode and once for this get * operation. */ @@ -128,36 +128,30 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( u64 ino = btrfs_ino(btrfs_inode); int ret; -again: - node = btrfs_get_delayed_node(btrfs_inode); - if (node) - return node; + do { + node = btrfs_get_delayed_node(btrfs_inode); + if (node) + return node; - node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS); - if (!node) - return ERR_PTR(-ENOMEM); - btrfs_init_delayed_node(node, root, ino); + node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS); + if (!node) + return ERR_PTR(-ENOMEM); + btrfs_init_delayed_node(node, root, ino); - /* cached in the btrfs inode and can be accessed */ - refcount_set(&node->refs, 2); + /* Cached in the inode and can be accessed */ + refcount_set(&node->refs, 2); - ret = radix_tree_preload(GFP_NOFS); - if (ret) { - kmem_cache_free(delayed_node_cache, node); - return ERR_PTR(ret); - } - - spin_lock(&root->inode_lock); - ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); - if (ret == -EEXIST) { - spin_unlock(&root->inode_lock); - kmem_cache_free(delayed_node_cache, node); - radix_tree_preload_end(); - goto again; - } + spin_lock(&root->inode_lock); + ret = xa_insert(&root->delayed_nodes, ino, node, GFP_NOFS); + if (ret) { + spin_unlock(&root->inode_lock); + kmem_cache_free(delayed_node_cache, node); + if (ret != -EBUSY) + return ERR_PTR(ret); + } + } while (ret); btrfs_inode->delayed_node = node; spin_unlock(&root->inode_lock); - radix_tree_preload_end(); return node; } @@ -276,8 +270,7 @@ static void __btrfs_release_delayed_node( * back up. We can delete it now. */ ASSERT(refcount_read(&delayed_node->refs) == 0); - radix_tree_delete(&root->delayed_nodes_tree, - delayed_node->inode_id); + xa_erase(&root->delayed_nodes, delayed_node->inode_id); spin_unlock(&root->inode_lock); kmem_cache_free(delayed_node_cache, delayed_node); } @@ -1870,34 +1863,35 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) { - u64 inode_id = 0; + unsigned long index = 0; + struct btrfs_delayed_node *delayed_node; struct btrfs_delayed_node *delayed_nodes[8]; - int i, n; while (1) { + int n = 0; + spin_lock(&root->inode_lock); - n = radix_tree_gang_lookup(&root->delayed_nodes_tree, - (void **)delayed_nodes, inode_id, - ARRAY_SIZE(delayed_nodes)); - if (!n) { + if (xa_empty(&root->delayed_nodes)) { spin_unlock(&root->inode_lock); - break; + return; } - inode_id = delayed_nodes[n - 1]->inode_id + 1; - for (i = 0; i < n; i++) { + xa_for_each_start(&root->delayed_nodes, index, delayed_node, index) { /* * Don't increase refs in case the node is dead and * about to be removed from the tree in the loop below */ - if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) - delayed_nodes[i] = NULL; + if (refcount_inc_not_zero(&delayed_node->refs)) { + delayed_nodes[n] = delayed_node; + n++; + } + if (n >= ARRAY_SIZE(delayed_nodes)) + break; } + index++; spin_unlock(&root->inode_lock); - for (i = 0; i < n; i++) { - if (!delayed_nodes[i]) - continue; + for (int i = 0; i < n; i++) { __btrfs_kill_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ce945db1243..15eccd8f3b1b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1160,7 +1160,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, root->nr_delalloc_inodes = 0; root->nr_ordered_extents = 0; root->inode_tree = RB_ROOT; - INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); + xa_init_flags(&root->delayed_nodes, GFP_ATOMIC); btrfs_init_root_block_rsv(root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 72b3bff2742e..88a84cd133d8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3881,7 +3881,7 @@ cache_index: * cache. * * This is required for both inode re-read from disk and delayed inode - * in delayed_nodes_tree. + * in the delayed_nodes xarray. */ if (BTRFS_I(inode)->last_trans == fs_info->generation) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, -- cgit v1.2.3 From 8ee922689d67b7cfa6acbe2aa1ee76ac72e6fc8a Mon Sep 17 00:00:00 2001 From: Gabriel Niebler Date: Thu, 21 Apr 2022 17:45:38 +0200 Subject: btrfs: turn fs_info member buffer_radix into XArray MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … named 'extent_buffers'. Also adjust all usages of this object to use the XArray API, which greatly simplifies the code as it takes care of locking and is generally easier to use and understand, providing notionally simpler array semantics. Also perform some light refactoring. Reviewed-by: Nikolay Borisov Signed-off-by: Gabriel Niebler Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 +- fs/btrfs/disk-io.c | 4 +- fs/btrfs/extent_io.c | 122 +++++++++++++++++-------------------------- fs/btrfs/tests/btrfs-tests.c | 22 ++------ 4 files changed, 55 insertions(+), 97 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9eebd96c6639..fb299fe53a89 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -994,10 +994,10 @@ struct btrfs_fs_info { struct btrfs_delayed_root *delayed_root; - /* Extent buffer radix tree */ + /* Extent buffer xarray */ spinlock_t buffer_lock; /* Entries are eb->start / sectorsize */ - struct radix_tree_root buffer_radix; + struct xarray extent_buffers; /* next backup root to be overwritten */ int backup_root_index; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 15eccd8f3b1b..6b43373d2d85 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -486,7 +486,7 @@ static int csum_dirty_subpage_buffers(struct btrfs_fs_info *fs_info, uptodate = btrfs_subpage_test_uptodate(fs_info, page, cur, fs_info->nodesize); - /* A dirty eb shouldn't disappear from buffer_radix */ + /* A dirty eb shouldn't disappear from extent_buffers */ if (WARN_ON(!eb)) return -EUCLEAN; @@ -3151,7 +3151,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) { INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); - INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); + xa_init_flags(&fs_info->extent_buffers, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 07888cce3bce..66636e43e339 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2965,7 +2965,7 @@ static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) } /* - * Find extent buffer for a givne bytenr. + * Find extent buffer for a given bytenr. * * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking * in endio context. @@ -2984,11 +2984,9 @@ static struct extent_buffer *find_extent_buffer_readpage( return (struct extent_buffer *)page->private; } - /* For subpage case, we need to lookup buffer radix tree */ - rcu_read_lock(); - eb = radix_tree_lookup(&fs_info->buffer_radix, - bytenr >> fs_info->sectorsize_bits); - rcu_read_unlock(); + /* For subpage case, we need to lookup extent buffer xarray */ + eb = xa_load(&fs_info->extent_buffers, + bytenr >> fs_info->sectorsize_bits); ASSERT(eb); return eb; } @@ -4447,8 +4445,8 @@ static struct extent_buffer *find_extent_buffer_nolock( struct extent_buffer *eb; rcu_read_lock(); - eb = radix_tree_lookup(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits); + eb = xa_load(&fs_info->extent_buffers, + start >> fs_info->sectorsize_bits); if (eb && atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); return eb; @@ -6141,24 +6139,22 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, if (!eb) return ERR_PTR(-ENOMEM); eb->fs_info = fs_info; -again: - ret = radix_tree_preload(GFP_NOFS); - if (ret) { - exists = ERR_PTR(ret); - goto free_eb; - } - spin_lock(&fs_info->buffer_lock); - ret = radix_tree_insert(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits, eb); - spin_unlock(&fs_info->buffer_lock); - radix_tree_preload_end(); - if (ret == -EEXIST) { - exists = find_extent_buffer(fs_info, start); - if (exists) + + do { + ret = xa_insert(&fs_info->extent_buffers, + start >> fs_info->sectorsize_bits, + eb, GFP_NOFS); + if (ret == -ENOMEM) { + exists = ERR_PTR(ret); goto free_eb; - else - goto again; - } + } + if (ret == -EBUSY) { + exists = find_extent_buffer(fs_info, start); + if (exists) + goto free_eb; + } + } while (ret); + check_buffer_tree_ref(eb); set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); @@ -6333,25 +6329,22 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, } if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); -again: - ret = radix_tree_preload(GFP_NOFS); - if (ret) { - exists = ERR_PTR(ret); - goto free_eb; - } - - spin_lock(&fs_info->buffer_lock); - ret = radix_tree_insert(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits, eb); - spin_unlock(&fs_info->buffer_lock); - radix_tree_preload_end(); - if (ret == -EEXIST) { - exists = find_extent_buffer(fs_info, start); - if (exists) + + do { + ret = xa_insert(&fs_info->extent_buffers, + start >> fs_info->sectorsize_bits, + eb, GFP_NOFS); + if (ret == -ENOMEM) { + exists = ERR_PTR(ret); goto free_eb; - else - goto again; - } + } + if (ret == -EBUSY) { + exists = find_extent_buffer(fs_info, start); + if (exists) + goto free_eb; + } + } while (ret); + /* add one reference for the tree */ check_buffer_tree_ref(eb); set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); @@ -6396,10 +6389,8 @@ static int release_extent_buffer(struct extent_buffer *eb) spin_unlock(&eb->refs_lock); - spin_lock(&fs_info->buffer_lock); - radix_tree_delete(&fs_info->buffer_radix, - eb->start >> fs_info->sectorsize_bits); - spin_unlock(&fs_info->buffer_lock); + xa_erase(&fs_info->extent_buffers, + eb->start >> fs_info->sectorsize_bits); } else { spin_unlock(&eb->refs_lock); } @@ -7344,42 +7335,25 @@ void memmove_extent_buffer(const struct extent_buffer *dst, } } -#define GANG_LOOKUP_SIZE 16 static struct extent_buffer *get_next_extent_buffer( struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr) { - struct extent_buffer *gang[GANG_LOOKUP_SIZE]; - struct extent_buffer *found = NULL; + struct extent_buffer *eb; + unsigned long index; u64 page_start = page_offset(page); - u64 cur = page_start; ASSERT(in_range(bytenr, page_start, PAGE_SIZE)); lockdep_assert_held(&fs_info->buffer_lock); - while (cur < page_start + PAGE_SIZE) { - int ret; - int i; - - ret = radix_tree_gang_lookup(&fs_info->buffer_radix, - (void **)gang, cur >> fs_info->sectorsize_bits, - min_t(unsigned int, GANG_LOOKUP_SIZE, - PAGE_SIZE / fs_info->nodesize)); - if (ret == 0) - goto out; - for (i = 0; i < ret; i++) { - /* Already beyond page end */ - if (gang[i]->start >= page_start + PAGE_SIZE) - goto out; - /* Found one */ - if (gang[i]->start >= bytenr) { - found = gang[i]; - goto out; - } - } - cur = gang[ret - 1]->start + gang[ret - 1]->len; + xa_for_each_start(&fs_info->extent_buffers, index, eb, + page_start >> fs_info->sectorsize_bits) { + if (in_range(eb->start, page_start, PAGE_SIZE)) + return eb; + else if (eb->start >= page_start + PAGE_SIZE) + /* Already beyond page end */ + return NULL; } -out: - return found; + return NULL; } static int try_release_subpage_extent_buffer(struct page *page) diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index d8e56edd6991..c8c4efc9a3fb 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -150,8 +150,8 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) { - struct radix_tree_iter iter; - void **slot; + unsigned long index; + struct extent_buffer *eb; struct btrfs_device *dev, *tmp; if (!fs_info) @@ -163,25 +163,9 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) test_mnt->mnt_sb->s_fs_info = NULL; - spin_lock(&fs_info->buffer_lock); - radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { - struct extent_buffer *eb; - - eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); - if (!eb) - continue; - /* Shouldn't happen but that kind of thinking creates CVE's */ - if (radix_tree_exception(eb)) { - if (radix_tree_deref_retry(eb)) - slot = radix_tree_iter_retry(&iter); - continue; - } - slot = radix_tree_iter_resume(slot, &iter); - spin_unlock(&fs_info->buffer_lock); + xa_for_each(&fs_info->extent_buffers, index, eb) { free_extent_buffer_stale(eb); - spin_lock(&fs_info->buffer_lock); } - spin_unlock(&fs_info->buffer_lock); btrfs_mapping_tree_free(&fs_info->mapping_tree); list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices, -- cgit v1.2.3 From 48b36a602a335c184505346b5b37077840660634 Mon Sep 17 00:00:00 2001 From: Gabriel Niebler Date: Tue, 3 May 2022 12:44:43 +0200 Subject: btrfs: turn fs_roots_radix in btrfs_fs_info into an XArray MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … rename it to simply fs_roots and adjust all usages of this object to use the XArray API, because it is notionally easier to use and understand, as it provides array semantics, and also takes care of locking for us, further simplifying the code. Also do some refactoring, esp. where the API change requires largely rewriting some functions, anyway. Reviewed-by: Nikolay Borisov Signed-off-by: Gabriel Niebler Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 8 +- fs/btrfs/disk-io.c | 173 +++++++++++++++++++------------------------ fs/btrfs/extent-tree.c | 2 +- fs/btrfs/inode.c | 13 ++-- fs/btrfs/tests/btrfs-tests.c | 2 +- fs/btrfs/transaction.c | 112 ++++++++++++---------------- 6 files changed, 139 insertions(+), 171 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fb299fe53a89..b0398e9048ce 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -675,8 +675,9 @@ struct btrfs_fs_info { rwlock_t global_root_lock; struct rb_root global_root_tree; - spinlock_t fs_roots_radix_lock; - struct radix_tree_root fs_roots_radix; + /* The xarray that holds all the FS roots */ + spinlock_t fs_roots_lock; + struct xarray fs_roots; /* block group cache stuff */ rwlock_t block_group_cache_lock; @@ -1118,7 +1119,8 @@ enum { */ BTRFS_ROOT_SHAREABLE, BTRFS_ROOT_TRACK_DIRTY, - BTRFS_ROOT_IN_RADIX, + /* The root is tracked in fs_info::fs_roots */ + BTRFS_ROOT_REGISTERED, BTRFS_ROOT_ORPHAN_ITEM_INSERTED, BTRFS_ROOT_DEFRAG_RUNNING, BTRFS_ROOT_FORCE_COW, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6b43373d2d85..7787ac83ec2f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -1212,9 +1211,9 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); #ifdef CONFIG_BTRFS_DEBUG INIT_LIST_HEAD(&root->leak_list); - spin_lock(&fs_info->fs_roots_radix_lock); + spin_lock(&fs_info->fs_roots_lock); list_add_tail(&root->leak_list, &fs_info->allocated_roots); - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); #endif } @@ -1661,12 +1660,11 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, { struct btrfs_root *root; - spin_lock(&fs_info->fs_roots_radix_lock); - root = radix_tree_lookup(&fs_info->fs_roots_radix, - (unsigned long)root_id); + spin_lock(&fs_info->fs_roots_lock); + root = xa_load(&fs_info->fs_roots, (unsigned long)root_id); if (root) root = btrfs_grab_root(root); - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); return root; } @@ -1708,20 +1706,14 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, { int ret; - ret = radix_tree_preload(GFP_NOFS); - if (ret) - return ret; - - spin_lock(&fs_info->fs_roots_radix_lock); - ret = radix_tree_insert(&fs_info->fs_roots_radix, - (unsigned long)root->root_key.objectid, - root); + spin_lock(&fs_info->fs_roots_lock); + ret = xa_insert(&fs_info->fs_roots, (unsigned long)root->root_key.objectid, + root, GFP_NOFS); if (ret == 0) { btrfs_grab_root(root); - set_bit(BTRFS_ROOT_IN_RADIX, &root->state); + set_bit(BTRFS_ROOT_REGISTERED, &root->state); } - spin_unlock(&fs_info->fs_roots_radix_lock); - radix_tree_preload_end(); + spin_unlock(&fs_info->fs_roots_lock); return ret; } @@ -2351,9 +2343,9 @@ void btrfs_put_root(struct btrfs_root *root) btrfs_drew_lock_destroy(&root->snapshot_lock); free_root_extent_buffers(root); #ifdef CONFIG_BTRFS_DEBUG - spin_lock(&root->fs_info->fs_roots_radix_lock); + spin_lock(&root->fs_info->fs_roots_lock); list_del_init(&root->leak_list); - spin_unlock(&root->fs_info->fs_roots_radix_lock); + spin_unlock(&root->fs_info->fs_roots_lock); #endif kfree(root); } @@ -2361,28 +2353,21 @@ void btrfs_put_root(struct btrfs_root *root) void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) { - int ret; - struct btrfs_root *gang[8]; - int i; + struct btrfs_root *root; + unsigned long index = 0; while (!list_empty(&fs_info->dead_roots)) { - gang[0] = list_entry(fs_info->dead_roots.next, - struct btrfs_root, root_list); - list_del(&gang[0]->root_list); + root = list_entry(fs_info->dead_roots.next, + struct btrfs_root, root_list); + list_del(&root->root_list); - if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) - btrfs_drop_and_free_fs_root(fs_info, gang[0]); - btrfs_put_root(gang[0]); + if (test_bit(BTRFS_ROOT_REGISTERED, &root->state)) + btrfs_drop_and_free_fs_root(fs_info, root); + btrfs_put_root(root); } - while (1) { - ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, - (void **)gang, 0, - ARRAY_SIZE(gang)); - if (!ret) - break; - for (i = 0; i < ret; i++) - btrfs_drop_and_free_fs_root(fs_info, gang[i]); + xa_for_each(&fs_info->fs_roots, index, root) { + btrfs_drop_and_free_fs_root(fs_info, root); } } @@ -3150,7 +3135,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) { - INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); + xa_init_flags(&fs_info->fs_roots, GFP_ATOMIC); xa_init_flags(&fs_info->extent_buffers, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); @@ -3159,7 +3144,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->trans_lock); - spin_lock_init(&fs_info->fs_roots_radix_lock); + spin_lock_init(&fs_info->fs_roots_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->super_lock); @@ -3390,7 +3375,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info) /* * btrfs_find_orphan_roots() is responsible for finding all the dead * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load - * them into the fs_info->fs_roots_radix tree. This must be done before + * them into the fs_info->fs_roots. This must be done before * calling btrfs_orphan_cleanup() on the tree root. If we don't do it * first, then btrfs_orphan_cleanup() will delete a dead root's orphan * item before the root's tree is deleted - this means that if we unmount @@ -4514,12 +4499,11 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, { bool drop_ref = false; - spin_lock(&fs_info->fs_roots_radix_lock); - radix_tree_delete(&fs_info->fs_roots_radix, - (unsigned long)root->root_key.objectid); - if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state)) + spin_lock(&fs_info->fs_roots_lock); + xa_erase(&fs_info->fs_roots, (unsigned long)root->root_key.objectid); + if (test_and_clear_bit(BTRFS_ROOT_REGISTERED, &root->state)) drop_ref = true; - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); if (BTRFS_FS_ERROR(fs_info)) { ASSERT(root->log_root == NULL); @@ -4535,50 +4519,48 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { - u64 root_objectid = 0; - struct btrfs_root *gang[8]; - int i = 0; + struct btrfs_root *roots[8]; + unsigned long index = 0; + int i; int err = 0; - unsigned int ret = 0; + int grabbed; while (1) { - spin_lock(&fs_info->fs_roots_radix_lock); - ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, - (void **)gang, root_objectid, - ARRAY_SIZE(gang)); - if (!ret) { - spin_unlock(&fs_info->fs_roots_radix_lock); - break; + struct btrfs_root *root; + + spin_lock(&fs_info->fs_roots_lock); + if (!xa_find(&fs_info->fs_roots, &index, ULONG_MAX, XA_PRESENT)) { + spin_unlock(&fs_info->fs_roots_lock); + return err; } - root_objectid = gang[ret - 1]->root_key.objectid + 1; - for (i = 0; i < ret; i++) { - /* Avoid to grab roots in dead_roots */ - if (btrfs_root_refs(&gang[i]->root_item) == 0) { - gang[i] = NULL; - continue; - } - /* grab all the search result for later use */ - gang[i] = btrfs_grab_root(gang[i]); + grabbed = 0; + xa_for_each_start(&fs_info->fs_roots, index, root, index) { + /* Avoid grabbing roots in dead_roots */ + if (btrfs_root_refs(&root->root_item) > 0) + roots[grabbed++] = btrfs_grab_root(root); + if (grabbed >= ARRAY_SIZE(roots)) + break; } - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); - for (i = 0; i < ret; i++) { - if (!gang[i]) + for (i = 0; i < grabbed; i++) { + if (!roots[i]) continue; - root_objectid = gang[i]->root_key.objectid; - err = btrfs_orphan_cleanup(gang[i]); + index = roots[i]->root_key.objectid; + err = btrfs_orphan_cleanup(roots[i]); if (err) - break; - btrfs_put_root(gang[i]); + goto out; + btrfs_put_root(roots[i]); } - root_objectid++; + index++; } - /* release the uncleaned roots due to error */ - for (; i < ret; i++) { - if (gang[i]) - btrfs_put_root(gang[i]); +out: + /* Release the roots that remain uncleaned due to error */ + for (; i < grabbed; i++) { + if (roots[i]) + btrfs_put_root(roots[i]); } return err; } @@ -4888,31 +4870,28 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) { - struct btrfs_root *gang[8]; - u64 root_objectid = 0; - int ret; - - spin_lock(&fs_info->fs_roots_radix_lock); - while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, - (void **)gang, root_objectid, - ARRAY_SIZE(gang))) != 0) { - int i; + unsigned long index = 0; + int grabbed = 0; + struct btrfs_root *roots[8]; - for (i = 0; i < ret; i++) - gang[i] = btrfs_grab_root(gang[i]); - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_lock(&fs_info->fs_roots_lock); + while ((grabbed = xa_extract(&fs_info->fs_roots, (void **)roots, index, + ULONG_MAX, 8, XA_PRESENT))) { + for (int i = 0; i < grabbed; i++) + roots[i] = btrfs_grab_root(roots[i]); + spin_unlock(&fs_info->fs_roots_lock); - for (i = 0; i < ret; i++) { - if (!gang[i]) + for (int i = 0; i < grabbed; i++) { + if (!roots[i]) continue; - root_objectid = gang[i]->root_key.objectid; - btrfs_free_log(NULL, gang[i]); - btrfs_put_root(gang[i]); + index = roots[i]->root_key.objectid; + btrfs_free_log(NULL, roots[i]); + btrfs_put_root(roots[i]); } - root_objectid++; - spin_lock(&fs_info->fs_roots_radix_lock); + index++; + spin_lock(&fs_info->fs_roots_lock); } - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); btrfs_free_log_root_tree(NULL, fs_info); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 963160a0c393..9eb7977130d4 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5810,7 +5810,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) btrfs_qgroup_convert_reserved_meta(root, INT_MAX); btrfs_qgroup_free_meta_all_pertrans(root); - if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) + if (test_bit(BTRFS_ROOT_REGISTERED, &root->state)) btrfs_add_dropped_root(trans, root); else btrfs_put_root(root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 88a84cd133d8..a907b3ba7d62 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3549,6 +3549,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) u64 last_objectid = 0; int ret = 0, nr_unlink = 0; + /* Bail out if the cleanup is already running. */ if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) return 0; @@ -3631,17 +3632,17 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) * * btrfs_find_orphan_roots() ran before us, which has * found all deleted roots and loaded them into - * fs_info->fs_roots_radix. So here we can find if an + * fs_info->fs_roots. So here we can find if an * orphan item corresponds to a deleted root by looking - * up the root from that radix tree. + * up the root from that xarray. */ - spin_lock(&fs_info->fs_roots_radix_lock); - dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, - (unsigned long)found_key.objectid); + spin_lock(&fs_info->fs_roots_lock); + dead_root = xa_load(&fs_info->fs_roots, + (unsigned long)found_key.objectid); if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) is_dead_root = 1; - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); if (is_dead_root) { /* prevent this orphan from being found again */ diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index c8c4efc9a3fb..1591bfa55bcc 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -186,7 +186,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root) if (!root) return; /* Will be freed by btrfs_free_fs_roots */ - if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) + if (WARN_ON(test_bit(BTRFS_ROOT_REGISTERED, &root->state))) return; btrfs_global_root_delete(root); btrfs_put_root(root); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 875b801ab3d7..06c0a958d114 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -23,7 +23,7 @@ #include "space-info.h" #include "zoned.h" -#define BTRFS_ROOT_TRANS_TAG 0 +#define BTRFS_ROOT_TRANS_TAG XA_MARK_0 /* * Transaction states and transitions @@ -437,15 +437,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, */ smp_wmb(); - spin_lock(&fs_info->fs_roots_radix_lock); + spin_lock(&fs_info->fs_roots_lock); if (root->last_trans == trans->transid && !force) { - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); return 0; } - radix_tree_tag_set(&fs_info->fs_roots_radix, - (unsigned long)root->root_key.objectid, - BTRFS_ROOT_TRANS_TAG); - spin_unlock(&fs_info->fs_roots_radix_lock); + xa_set_mark(&fs_info->fs_roots, + (unsigned long)root->root_key.objectid, + BTRFS_ROOT_TRANS_TAG); + spin_unlock(&fs_info->fs_roots_lock); root->last_trans = trans->transid; /* this is pretty tricky. We don't want to @@ -487,11 +487,9 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, spin_unlock(&cur_trans->dropped_roots_lock); /* Make sure we don't try to update the root at commit time */ - spin_lock(&fs_info->fs_roots_radix_lock); - radix_tree_tag_clear(&fs_info->fs_roots_radix, - (unsigned long)root->root_key.objectid, - BTRFS_ROOT_TRANS_TAG); - spin_unlock(&fs_info->fs_roots_radix_lock); + xa_clear_mark(&fs_info->fs_roots, + (unsigned long)root->root_key.objectid, + BTRFS_ROOT_TRANS_TAG); } int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, @@ -1404,9 +1402,8 @@ void btrfs_add_dead_root(struct btrfs_root *root) static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *gang[8]; - int i; - int ret; + struct btrfs_root *root; + unsigned long index; /* * At this point no one can be using this transaction to modify any tree @@ -1414,57 +1411,46 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) */ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); - spin_lock(&fs_info->fs_roots_radix_lock); - while (1) { - ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, - (void **)gang, 0, - ARRAY_SIZE(gang), - BTRFS_ROOT_TRANS_TAG); - if (ret == 0) - break; - for (i = 0; i < ret; i++) { - struct btrfs_root *root = gang[i]; - int ret2; - - /* - * At this point we can neither have tasks logging inodes - * from a root nor trying to commit a log tree. - */ - ASSERT(atomic_read(&root->log_writers) == 0); - ASSERT(atomic_read(&root->log_commit[0]) == 0); - ASSERT(atomic_read(&root->log_commit[1]) == 0); - - radix_tree_tag_clear(&fs_info->fs_roots_radix, - (unsigned long)root->root_key.objectid, - BTRFS_ROOT_TRANS_TAG); - spin_unlock(&fs_info->fs_roots_radix_lock); - - btrfs_free_log(trans, root); - ret2 = btrfs_update_reloc_root(trans, root); - if (ret2) - return ret2; - - /* see comments in should_cow_block() */ - clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); - smp_mb__after_atomic(); - - if (root->commit_root != root->node) { - list_add_tail(&root->dirty_list, - &trans->transaction->switch_commits); - btrfs_set_root_node(&root->root_item, - root->node); - } + spin_lock(&fs_info->fs_roots_lock); + xa_for_each_marked(&fs_info->fs_roots, index, root, BTRFS_ROOT_TRANS_TAG) { + int ret; + + /* + * At this point we can neither have tasks logging inodes + * from a root nor trying to commit a log tree. + */ + ASSERT(atomic_read(&root->log_writers) == 0); + ASSERT(atomic_read(&root->log_commit[0]) == 0); + ASSERT(atomic_read(&root->log_commit[1]) == 0); + + xa_clear_mark(&fs_info->fs_roots, + (unsigned long)root->root_key.objectid, + BTRFS_ROOT_TRANS_TAG); + spin_unlock(&fs_info->fs_roots_lock); - ret2 = btrfs_update_root(trans, fs_info->tree_root, - &root->root_key, - &root->root_item); - if (ret2) - return ret2; - spin_lock(&fs_info->fs_roots_radix_lock); - btrfs_qgroup_free_meta_all_pertrans(root); + btrfs_free_log(trans, root); + ret = btrfs_update_reloc_root(trans, root); + if (ret) + return ret; + + /* See comments in should_cow_block() */ + clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); + smp_mb__after_atomic(); + + if (root->commit_root != root->node) { + list_add_tail(&root->dirty_list, + &trans->transaction->switch_commits); + btrfs_set_root_node(&root->root_item, root->node); } + + ret = btrfs_update_root(trans, fs_info->tree_root, + &root->root_key, &root->root_item); + if (ret) + return ret; + spin_lock(&fs_info->fs_roots_lock); + btrfs_qgroup_free_meta_all_pertrans(root); } - spin_unlock(&fs_info->fs_roots_radix_lock); + spin_unlock(&fs_info->fs_roots_lock); return 0; } -- cgit v1.2.3 From 2fe6a5a1d23dceeb93e76ea16cc51be916ee9f15 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Mar 2019 11:45:48 +0100 Subject: btrfs: sink parameter is_data to btrfs_set_disk_extent_flags The parameter has been added in 2009 in the infamous monster commit 5d4f98a28c7d ("Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)") but not used ever since. We can sink it and allow further simplifications. Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 2 +- fs/btrfs/ctree.h | 3 +-- fs/btrfs/extent-tree.c | 6 +++--- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1e24695ede0a..6e556031a8f3 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -343,7 +343,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, int level = btrfs_header_level(buf); ret = btrfs_set_disk_extent_flags(trans, buf, - new_flags, level, 0); + new_flags, level); if (ret) return ret; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b0398e9048ce..57b5b76c7f9c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2811,8 +2811,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref); int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, - struct extent_buffer *eb, u64 flags, - int level, int is_data); + struct extent_buffer *eb, u64 flags, int level); int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9eb7977130d4..6f4aecb71eff 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2180,7 +2180,7 @@ out: int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, struct extent_buffer *eb, u64 flags, - int level, int is_data) + int level) { struct btrfs_delayed_extent_op *extent_op; int ret; @@ -2192,7 +2192,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, extent_op->flags_to_set = flags; extent_op->update_flags = true; extent_op->update_key = false; - extent_op->is_data = is_data ? true : false; + extent_op->is_data = false; extent_op->level = level; ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); @@ -5136,7 +5136,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_set_disk_extent_flags(trans, eb, flag, - btrfs_header_level(eb), 0); + btrfs_header_level(eb)); BUG_ON(ret); /* -ENOMEM */ wc->flags[level] |= flag; } -- cgit v1.2.3 From cb3a12d9885974a5f2afe0c3e9a752195401828f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 27 Jul 2021 14:59:41 +0200 Subject: btrfs: rename bio_flags in parameters and switch type Several functions take parameter bio_flags that was simplified to just compress type, unify it and change the type accordingly. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/extent_io.c | 29 +++++++++++++++-------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 7 ++++--- 4 files changed, 21 insertions(+), 19 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 57b5b76c7f9c..0d1d43f171cc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3253,7 +3253,7 @@ u64 btrfs_file_extent_end(const struct btrfs_path *path); /* inode.c */ void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags); + int mirror_num, enum btrfs_compression_type compress_type); unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio, u32 bio_offset, struct page *page, u64 start, u64 end); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index faa41ca23bbb..5ef07b2ebebf 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -178,7 +178,8 @@ static int add_extent_changeset(struct extent_state *state, u32 bits, return ret; } -static void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags) +static void submit_one_bio(struct bio *bio, int mirror_num, + enum btrfs_compression_type compress_type) { struct extent_io_tree *tree = bio->bi_private; @@ -189,7 +190,7 @@ static void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_fl if (is_data_inode(tree->private_data)) btrfs_submit_data_bio(tree->private_data, bio, mirror_num, - bio_flags); + compress_type); else btrfs_submit_metadata_bio(tree->private_data, bio, mirror_num); /* @@ -3246,7 +3247,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size) * a contiguous page to the previous one * @size: portion of page that we want to write * @pg_offset: starting offset in the page - * @bio_flags: flags of the current bio to see if we can merge them + * @compress_type: compression type of the current bio to see if we can merge them * * Attempt to add a page to bio considering stripe alignment etc. * @@ -3258,7 +3259,7 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl, struct page *page, u64 disk_bytenr, unsigned int size, unsigned int pg_offset, - unsigned long bio_flags) + enum btrfs_compression_type compress_type) { struct bio *bio = bio_ctrl->bio; u32 bio_size = bio->bi_iter.bi_size; @@ -3270,7 +3271,7 @@ static int btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl, ASSERT(bio); /* The limit should be calculated when bio_ctrl->bio is allocated */ ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary); - if (bio_ctrl->bio_flags != bio_flags) + if (bio_ctrl->bio_flags != compress_type) return 0; if (bio_ctrl->bio_flags != BTRFS_COMPRESS_NONE) @@ -3359,7 +3360,7 @@ static int alloc_new_bio(struct btrfs_inode *inode, unsigned int opf, bio_end_io_t end_io_func, u64 disk_bytenr, u32 offset, u64 file_offset, - unsigned long bio_flags) + enum btrfs_compression_type compress_type) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio; @@ -3370,12 +3371,12 @@ static int alloc_new_bio(struct btrfs_inode *inode, * For compressed page range, its disk_bytenr is always @disk_bytenr * passed in, no matter if we have added any range into previous bio. */ - if (bio_flags != BTRFS_COMPRESS_NONE) + if (compress_type != BTRFS_COMPRESS_NONE) bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; else bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT; bio_ctrl->bio = bio; - bio_ctrl->bio_flags = bio_flags; + bio_ctrl->bio_flags = compress_type; bio->bi_end_io = end_io_func; bio->bi_private = &inode->io_tree; bio->bi_opf = opf; @@ -3434,7 +3435,7 @@ error: * @end_io_func: end_io callback for new bio * @mirror_num: desired mirror to read/write * @prev_bio_flags: flags of previous bio to see if we can merge the current one - * @bio_flags: flags of the current bio to see if we can merge them + * @compress_type: compress type for current bio */ static int submit_extent_page(unsigned int opf, struct writeback_control *wbc, @@ -3443,7 +3444,7 @@ static int submit_extent_page(unsigned int opf, size_t size, unsigned long pg_offset, bio_end_io_t end_io_func, int mirror_num, - unsigned long bio_flags, + enum btrfs_compression_type compress_type, bool force_bio_submit) { int ret = 0; @@ -3468,7 +3469,7 @@ static int submit_extent_page(unsigned int opf, ret = alloc_new_bio(inode, bio_ctrl, wbc, opf, end_io_func, disk_bytenr, offset, page_offset(page) + cur, - bio_flags); + compress_type); if (ret < 0) return ret; } @@ -3476,14 +3477,14 @@ static int submit_extent_page(unsigned int opf, * We must go through btrfs_bio_add_page() to ensure each * page range won't cross various boundaries. */ - if (bio_flags != BTRFS_COMPRESS_NONE) + if (compress_type != BTRFS_COMPRESS_NONE) added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, size - offset, pg_offset + offset, - bio_flags); + compress_type); else added = btrfs_bio_add_page(bio_ctrl, page, disk_bytenr + offset, size - offset, - pg_offset + offset, bio_flags); + pg_offset + offset, compress_type); /* Metadata page range should never be split */ if (!is_data_inode(&inode->vfs_inode)) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7774a48053fd..17674b7e699c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -67,7 +67,7 @@ struct extent_io_tree; typedef void (submit_bio_hook_t)(struct inode *inode, struct bio *bio, int mirror_num, - unsigned long bio_flags); + enum btrfs_compression_type compress_type); typedef blk_status_t (extent_submit_bio_start_t)(struct inode *inode, struct bio *bio, u64 dio_file_offset); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 97b5342e66c2..63f71b3097d8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2573,7 +2573,7 @@ out: * c-3) otherwise: async submit */ void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, enum btrfs_compression_type compress_type) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; @@ -2602,7 +2602,7 @@ void btrfs_submit_data_bio(struct inode *inode, struct bio *bio, if (ret) goto out; - if (bio_flags != BTRFS_COMPRESS_NONE) { + if (compress_type != BTRFS_COMPRESS_NONE) { /* * btrfs_submit_compressed_read will handle completing * the bio if there were any errors, so just return @@ -7834,7 +7834,8 @@ static void btrfs_dio_private_put(struct btrfs_dio_private *dip) } static void submit_dio_repair_bio(struct inode *inode, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, + enum btrfs_compression_type compress_type) { struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); -- cgit v1.2.3 From 36e8c62273aaa1cb72919212356f2e75e04bcf3c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 5 May 2022 15:11:09 -0500 Subject: btrfs: add a btrfs_dio_rw wrapper Add a wrapper around iomap_dio_rw that keeps the direct I/O internals isolated in inode.c. Reviewed-by: Nikolay Borisov Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 4 ++-- fs/btrfs/file.c | 6 ++---- fs/btrfs/inode.c | 10 ++++++++-- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0d1d43f171cc..47e1e79e8bff 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3359,9 +3359,9 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, const struct btrfs_ioctl_encoded_io_args *encoded); +ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before); + extern const struct dentry_operations btrfs_dentry_operations; -extern const struct iomap_ops btrfs_dio_iomap_ops; -extern const struct iomap_dio_ops btrfs_dio_ops; /* Inode locking type flags, by default the exclusive lock is taken */ #define BTRFS_ILOCK_SHARED (1U << 0) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b64fb93d9046..46c2baa8fdf5 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1929,8 +1929,7 @@ relock: */ again: from->nofault = true; - err = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops, - IOMAP_DIO_PARTIAL, written); + err = btrfs_dio_rw(iocb, from, written); from->nofault = false; /* No increment (+=) because iomap returns a cumulative value. */ @@ -3693,8 +3692,7 @@ again: */ pagefault_disable(); to->nofault = true; - ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, - IOMAP_DIO_PARTIAL, read); + ret = btrfs_dio_rw(iocb, to, read); to->nofault = false; pagefault_enable(); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 63f71b3097d8..eb951ac52d22 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8155,15 +8155,21 @@ out_err: btrfs_dio_private_put(dip); } -const struct iomap_ops btrfs_dio_iomap_ops = { +static const struct iomap_ops btrfs_dio_iomap_ops = { .iomap_begin = btrfs_dio_iomap_begin, .iomap_end = btrfs_dio_iomap_end, }; -const struct iomap_dio_ops btrfs_dio_ops = { +static const struct iomap_dio_ops btrfs_dio_ops = { .submit_io = btrfs_submit_direct, }; +ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) +{ + return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, + IOMAP_DIO_PARTIAL, done_before); +} + static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { -- cgit v1.2.3 From a3e171a09cd4f67bb4c7ea93b552b543f19a308e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 5 May 2022 15:11:14 -0500 Subject: btrfs: move struct btrfs_dio_private to inode.c The btrfs_dio_private structure is only used in inode.c, so move the definition there. Reviewed-by: Nikolay Borisov Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/btrfs_inode.h | 24 ------------------------ fs/btrfs/ctree.h | 1 - fs/btrfs/inode.c | 24 ++++++++++++++++++++++++ 3 files changed, 24 insertions(+), 25 deletions(-) (limited to 'fs/btrfs/ctree.h') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 14c28213ca0d..33811e896623 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -395,30 +395,6 @@ static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode) return true; } -struct btrfs_dio_private { - struct inode *inode; - - /* - * Since DIO can use anonymous page, we cannot use page_offset() to - * grab the file offset, thus need a dedicated member for file offset. - */ - u64 file_offset; - /* Used for bio::bi_size */ - u32 bytes; - - /* - * References to this structure. There is one reference per in-flight - * bio plus one while we're still setting up. - */ - refcount_t refs; - - /* dio_bio came from fs/direct-io.c */ - struct bio *dio_bio; - - /* Array of checksums */ - u8 csums[]; -}; - /* * btrfs_inode_item stores flags in a u64, btrfs_inode stores them in two * separate u32s. These two functions convert between the two representations. diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 47e1e79e8bff..0e49b1a0c071 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3218,7 +3218,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); /* file-item.c */ -struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len); blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ebed9eb3e567..84b0a052da48 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -68,6 +68,30 @@ struct btrfs_dio_data { bool nocow_done; }; +struct btrfs_dio_private { + struct inode *inode; + + /* + * Since DIO can use anonymous page, we cannot use page_offset() to + * grab the file offset, thus need a dedicated member for file offset. + */ + u64 file_offset; + /* Used for bio::bi_size */ + u32 bytes; + + /* + * References to this structure. There is one reference per in-flight + * bio plus one while we're still setting up. + */ + refcount_t refs; + + /* dio_bio came from fs/direct-io.c */ + struct bio *dio_bio; + + /* Array of checksums */ + u8 csums[]; +}; + struct btrfs_rename_ctx { /* Output field. Stores the index number of the old directory entry. */ u64 index; -- cgit v1.2.3