diff options
author | Robbie Ko <robbieko@synology.com> | 2020-05-14 12:19:18 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2020-05-25 12:25:36 +0300 |
commit | c11fbb6ed0ddc11b992f9c668b79505d31956368 (patch) | |
tree | ce99a3789e94b681fda1dba17e96c584c1919d3d /fs/btrfs | |
parent | aeb935a455812e0ec15e15801f7a42d887e6c22f (diff) | |
download | linux-c11fbb6ed0ddc11b992f9c668b79505d31956368.tar.xz |
btrfs: reduce lock contention when creating snapshot
When creating a snapshot, ordered extents need to be flushed and this
can take a long time.
In create_snapshot there are two locks held when this happens:
1. Destination directory inode lock
2. Global subvolume semaphore
This will unnecessarily block other operations like subvolume destroy,
create, or setflag until the snapshot is created.
We can fix that by moving the flush outside the locked section as this
does not depend on the aforementioned locks. The code factors out the
snapshot related work from create_snapshot to btrfs_mksnapshot.
__btrfs_ioctl_snap_create
btrfs_mksubvol
create_subvol
btrfs_mksnapshot
<flush>
btrfs_mksubvol
create_snapshot
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Robbie Ko <robbieko@synology.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/ioctl.c | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 709d9446896a..973236b72a97 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -748,7 +748,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; - bool snapshot_force_cow = false; if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return -EINVAL; @@ -771,27 +770,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto free_pending; } - /* - * Force new buffered writes to reserve space even when NOCOW is - * possible. This is to avoid later writeback (running dealloc) to - * fallback to COW mode and unexpectedly fail with ENOSPC. - */ - btrfs_drew_read_lock(&root->snapshot_lock); - - ret = btrfs_start_delalloc_snapshot(root); - if (ret) - goto dec_and_free; - - /* - * All previous writes have started writeback in NOCOW mode, so now - * we force future writes to fallback to COW mode during snapshot - * creation. - */ - atomic_inc(&root->snapshot_force_cow); - snapshot_force_cow = true; - - btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); - btrfs_init_block_rsv(&pending_snapshot->block_rsv, BTRFS_BLOCK_RSV_TEMP); /* @@ -806,7 +784,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, &pending_snapshot->block_rsv, 8, false); if (ret) - goto dec_and_free; + goto free_pending; pending_snapshot->dentry = dentry; pending_snapshot->root = root; @@ -848,11 +826,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, fail: btrfs_put_root(pending_snapshot->snap); btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); -dec_and_free: - if (snapshot_force_cow) - atomic_dec(&root->snapshot_force_cow); - btrfs_drew_read_unlock(&root->snapshot_lock); - free_pending: kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); @@ -983,6 +956,45 @@ out_unlock: return error; } +static noinline int btrfs_mksnapshot(const struct path *parent, + const char *name, int namelen, + struct btrfs_root *root, + bool readonly, + struct btrfs_qgroup_inherit *inherit) +{ + int ret; + bool snapshot_force_cow = false; + + /* + * Force new buffered writes to reserve space even when NOCOW is + * possible. This is to avoid later writeback (running dealloc) to + * fallback to COW mode and unexpectedly fail with ENOSPC. + */ + btrfs_drew_read_lock(&root->snapshot_lock); + + ret = btrfs_start_delalloc_snapshot(root); + if (ret) + goto out; + + /* + * All previous writes have started writeback in NOCOW mode, so now + * we force future writes to fallback to COW mode during snapshot + * creation. + */ + atomic_inc(&root->snapshot_force_cow); + snapshot_force_cow = true; + + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); + + ret = btrfs_mksubvol(parent, name, namelen, + root, readonly, inherit); +out: + if (snapshot_force_cow) + atomic_dec(&root->snapshot_force_cow); + btrfs_drew_read_unlock(&root->snapshot_lock); + return ret; +} + /* * When we're defragging a range, we don't want to kick it off again * if it is really just waiting for delalloc to send it down. @@ -1762,7 +1774,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file, */ ret = -EPERM; } else { - ret = btrfs_mksubvol(&file->f_path, name, namelen, + ret = btrfs_mksnapshot(&file->f_path, name, namelen, BTRFS_I(src_inode)->root, readonly, inherit); } |