From eede2bf34f4fa84ce82e36878ccdd0fdc4b1463c Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 3 Nov 2016 10:28:12 -0700 Subject: Btrfs: prevent ioctls from interfering with a swap file A later patch will implement swap file support for Btrfs, but before we do that, we need to make sure that the various Btrfs ioctls cannot change a swap file. When a swap file is active, we must make sure that the extents of the file are not moved and that they don't become shared. That means that the following are not safe: - chattr +c (enable compression) - reflink - dedupe - snapshot - defrag Don't allow those to happen on an active swap file. Additionally, balance, resize, device remove, and device replace are also unsafe if they affect an active swapfile. Add a red-black tree of block groups and devices which contain an active swapfile. Relocation checks each block group against this tree and skips it or errors out for balance or resize, respectively. Device remove and device replace check the tree for the device they will operate on. Note that we don't have to worry about chattr -C (disable nocow), which we ignore for non-empty files, because an active swapfile must be non-empty and can't be truncated. We also don't have to worry about autodefrag because it's only done on COW files. Truncate and fallocate are already taken care of by the generic code. Device add doesn't do relocation so it's not an issue, either. Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 2aa48aecc52b..46092e67f61a 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -407,6 +407,13 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, if (IS_ERR(src_device)) return PTR_ERR(src_device); + if (btrfs_pinned_by_swapfile(fs_info, src_device)) { + btrfs_warn_in_rcu(fs_info, + "cannot replace device %s (devid %llu) due to active swapfile", + btrfs_dev_name(src_device), src_device->devid); + return -ETXTBSY; + } + ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name, src_device, &tgt_device); if (ret) -- cgit v1.2.3 From ab457246f8a45bbb3e79fd2a9c1044b5ac14ec99 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Mon, 12 Nov 2018 13:05:15 +0800 Subject: btrfs: remove redundant replace_state init dev_replace::replace_state has been set to BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED (0) in the same function, So delete the line which sets replace_state = 0; Reviewed-by: Nikolay Borisov Signed-off-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 46092e67f61a..bbc492185848 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -59,7 +59,6 @@ no_valid_dev_replace_entry_found: BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED; dev_replace->cont_reading_from_srcdev_mode = BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; - dev_replace->replace_state = 0; dev_replace->time_started = 0; dev_replace->time_stopped = 0; atomic64_set(&dev_replace->num_write_errors, 0); -- cgit v1.2.3 From 54862d6d28fc439d1f99c6e19bb38d9dc08161e0 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:16 +0800 Subject: btrfs: mark btrfs_dev_replace_start as static There isn't any other consumer other than in its own file dev-replace.c. Signed-off-by: Anand Jain Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/dev-replace.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index bbc492185848..32da6901dc88 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -389,7 +389,7 @@ static char* btrfs_dev_name(struct btrfs_device *device) return rcu_str_deref(device->name); } -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, +static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, int read_src) { diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 795c551f5b5e..27e3bb0cca11 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -13,9 +13,6 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, - const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, - int read_src); void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); -- cgit v1.2.3 From 0d228ece59a35a9b9e8ff0d40653234a6d90f61e Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:17 +0800 Subject: btrfs: dev-replace: go back to suspended state if target device is missing At the time of forced unmount we place the running replace to BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED state, so when the system comes back and expect the target device is missing. Then let the replace state continue to be in BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED state instead of BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED as there isn't any matching scrub running as part of replace. Fixes: e93c89c1aaaa ("Btrfs: add new sources for device replace code") CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 32da6901dc88..11df8f778b63 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -890,6 +890,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) "cannot continue dev_replace, tgtdev is missing"); btrfs_info(fs_info, "you may cancel the operation after 'mount -o degraded'"); + dev_replace->replace_state = + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; btrfs_dev_replace_write_unlock(dev_replace); return 0; } -- cgit v1.2.3 From 05c49e6bc1e8866ecfd674ebeeb58cdbff9145c2 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:18 +0800 Subject: btrfs: dev-replace: go back to suspend state if another EXCL_OP is running In a secnario where balance and replace co-exists as below, - start balance - pause balance - start replace - reboot and when system restarts, balance resumes first. Then the replace is attempted to restart but will fail as the EXCL_OP lock is already held by the balance. If so place the replace state back to BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED state. Fixes: 010a47bde9420 ("btrfs: add proper safety check before resuming dev-replace") CC: stable@vger.kernel.org # 4.18+ Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 11df8f778b63..33d07c426c59 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -903,6 +903,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) * dev-replace to start anyway. */ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { + btrfs_dev_replace_write_lock(dev_replace); + dev_replace->replace_state = + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; + btrfs_dev_replace_write_unlock(dev_replace); btrfs_info(fs_info, "cannot resume dev-replace, other exclusive operation running"); return 0; -- cgit v1.2.3 From d189dd70e2556181732598956d808ea53cc8774e Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 14 Nov 2018 13:50:26 +0800 Subject: btrfs: fix use-after-free due to race between replace start and cancel The device replace cancel thread can race with the replace start thread and if fs_info::scrubs_running is not yet set, btrfs_scrub_cancel() will fail to stop the scrub thread. The scrub thread continues with the scrub for replace which then will try to write to the target device and which is already freed by the cancel thread. scrub_setup_ctx() warns as tgtdev is NULL. struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) { ... if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); <=== sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; sctx->flush_all_writes = false; } [ 6724.497655] BTRFS info (device sdb): dev_replace from /dev/sdb (devid 1) to /dev/sdc started [ 6753.945017] BTRFS info (device sdb): dev_replace from /dev/sdb (devid 1) to /dev/sdc canceled [ 6852.426700] WARNING: CPU: 0 PID: 4494 at fs/btrfs/scrub.c:622 scrub_setup_ctx.isra.19+0x220/0x230 [btrfs] ... [ 6852.428928] RIP: 0010:scrub_setup_ctx.isra.19+0x220/0x230 [btrfs] ... [ 6852.432970] Call Trace: [ 6852.433202] btrfs_scrub_dev+0x19b/0x5c0 [btrfs] [ 6852.433471] btrfs_dev_replace_start+0x48c/0x6a0 [btrfs] [ 6852.433800] btrfs_dev_replace_by_ioctl+0x3a/0x60 [btrfs] [ 6852.434097] btrfs_ioctl+0x2476/0x2d20 [btrfs] [ 6852.434365] ? do_sigaction+0x7d/0x1e0 [ 6852.434623] do_vfs_ioctl+0xa9/0x6c0 [ 6852.434865] ? syscall_trace_enter+0x1c8/0x310 [ 6852.435124] ? syscall_trace_enter+0x1c8/0x310 [ 6852.435387] ksys_ioctl+0x60/0x90 [ 6852.435663] __x64_sys_ioctl+0x16/0x20 [ 6852.435907] do_syscall_64+0x50/0x180 [ 6852.436150] entry_SYSCALL_64_after_hwframe+0x49/0xbe Further, as the replace thread enters scrub_write_page_to_dev_replace() without the target device it panics: static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage) { ... bio_set_dev(bio, sbio->dev->bdev); <====== [ 6929.715145] BUG: unable to handle kernel NULL pointer dereference at 00000000000000a0 .. [ 6929.717106] Workqueue: btrfs-scrub btrfs_scrub_helper [btrfs] [ 6929.717420] RIP: 0010:scrub_write_page_to_dev_replace+0xb4/0x260 [btrfs] .. [ 6929.721430] Call Trace: [ 6929.721663] scrub_write_block_to_dev_replace+0x3f/0x60 [btrfs] [ 6929.721975] scrub_bio_end_io_worker+0x1af/0x490 [btrfs] [ 6929.722277] normal_work_helper+0xf0/0x4c0 [btrfs] [ 6929.722552] process_one_work+0x1f4/0x520 [ 6929.722805] ? process_one_work+0x16e/0x520 [ 6929.723063] worker_thread+0x46/0x3d0 [ 6929.723313] kthread+0xf8/0x130 [ 6929.723544] ? process_one_work+0x520/0x520 [ 6929.723800] ? kthread_delayed_work_timer_fn+0x80/0x80 [ 6929.724081] ret_from_fork+0x3a/0x50 Fix this by letting the btrfs_dev_replace_finishing() to do the job of cleaning after the cancel, including freeing of the target device. btrfs_dev_replace_finishing() is called when btrfs_scub_dev() returns along with the scrub return status. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 63 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 22 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 33d07c426c59..08092d329f66 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -803,39 +803,58 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; btrfs_dev_replace_write_unlock(dev_replace); - goto leave; + break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; + tgt_device = dev_replace->tgtdev; + src_device = dev_replace->srcdev; + btrfs_dev_replace_write_unlock(dev_replace); + btrfs_scrub_cancel(fs_info); + /* btrfs_dev_replace_finishing() will handle the cleanup part */ + btrfs_info_in_rcu(fs_info, + "dev_replace from %s (devid %llu) to %s canceled", + btrfs_dev_name(src_device), src_device->devid, + btrfs_dev_name(tgt_device)); + break; case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: + /* + * Scrub doing the replace isn't running so we need to do the + * cleanup step of btrfs_dev_replace_finishing() here + */ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; tgt_device = dev_replace->tgtdev; src_device = dev_replace->srcdev; dev_replace->tgtdev = NULL; dev_replace->srcdev = NULL; - break; - } - dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; - dev_replace->time_stopped = ktime_get_real_seconds(); - dev_replace->item_needs_writeback = 1; - btrfs_dev_replace_write_unlock(dev_replace); - btrfs_scrub_cancel(fs_info); + dev_replace->replace_state = + BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED; + dev_replace->time_stopped = ktime_get_real_seconds(); + dev_replace->item_needs_writeback = 1; - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); - return PTR_ERR(trans); - } - ret = btrfs_commit_transaction(trans); - WARN_ON(ret); + btrfs_dev_replace_write_unlock(dev_replace); - btrfs_info_in_rcu(fs_info, - "dev_replace from %s (devid %llu) to %s canceled", - btrfs_dev_name(src_device), src_device->devid, - btrfs_dev_name(tgt_device)); + btrfs_scrub_cancel(fs_info); + + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); + return PTR_ERR(trans); + } + ret = btrfs_commit_transaction(trans); + WARN_ON(ret); - if (tgt_device) - btrfs_destroy_dev_replace_tgtdev(tgt_device); + btrfs_info_in_rcu(fs_info, + "suspended dev_replace from %s (devid %llu) to %s canceled", + btrfs_dev_name(src_device), src_device->devid, + btrfs_dev_name(tgt_device)); + + if (tgt_device) + btrfs_destroy_dev_replace_tgtdev(tgt_device); + break; + default: + result = -EINVAL; + } -leave: mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); return result; } -- cgit v1.2.3 From b47dda2ef6d793b67fd5979032dcd106e3f0a5c9 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:20 +0800 Subject: btrfs: dev-replace: set result code of cancel by status of scrub The device-replace needs to check the result code of the scrub workers in btrfs_dev_replace_cancel and distinguish if successful cancel operation and when the there was no operation running. If btrfs_scrub_cancel() fails, return BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED so that user can try to cancel the replace again. Signed-off-by: Anand Jain Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 08092d329f66..c0878ff8185a 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -805,16 +805,23 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) btrfs_dev_replace_write_unlock(dev_replace); break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: - result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; tgt_device = dev_replace->tgtdev; src_device = dev_replace->srcdev; btrfs_dev_replace_write_unlock(dev_replace); - btrfs_scrub_cancel(fs_info); - /* btrfs_dev_replace_finishing() will handle the cleanup part */ - btrfs_info_in_rcu(fs_info, - "dev_replace from %s (devid %llu) to %s canceled", - btrfs_dev_name(src_device), src_device->devid, - btrfs_dev_name(tgt_device)); + ret = btrfs_scrub_cancel(fs_info); + if (ret < 0) { + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; + } else { + result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; + /* + * btrfs_dev_replace_finishing() will handle the + * cleanup part + */ + btrfs_info_in_rcu(fs_info, + "dev_replace from %s (devid %llu) to %s canceled", + btrfs_dev_name(src_device), src_device->devid, + btrfs_dev_name(tgt_device)); + } break; case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: /* -- cgit v1.2.3 From fe97e2e173afb61f1bb889c45f9597102c9f4849 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:21 +0800 Subject: btrfs: dev-replace: replace's scrub must not be running in suspended state When the replace state is in the suspended state, btrfs_scrub_cancel() should fail with -ENOTCONN as there is no scrub running. As a safety catch check if btrfs_scrub_cancel() returns -ENOTCONN and assert if it doesn't. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index c0878ff8185a..a84affa425d4 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -840,7 +840,9 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) btrfs_dev_replace_write_unlock(dev_replace); - btrfs_scrub_cancel(fs_info); + /* Scrub for replace must not be running in suspended state */ + ret = btrfs_scrub_cancel(fs_info); + ASSERT(ret != -ENOTCONN); trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { -- cgit v1.2.3 From 53e62fb5a4aedeff05aa1efd277de32dc765f4bf Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sun, 11 Nov 2018 22:22:24 +0800 Subject: btrfs: dev-replace: add explicit check for replace result "no error" We recast the replace return status BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS to 0, to indicate no error. And since BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR should also return 0, which is also declared as 0, so we just return. Instead add it to the if statement so that there is enough clarity while reading the code. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index a84affa425d4..85d93bd3b27a 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -539,8 +539,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, args->start.cont_reading_from_srcdev_mode); args->result = ret; /* don't warn if EINPROGRESS, someone else might be running scrub */ - if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS) - ret = 0; + if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS || + ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR) + return 0; return ret; } -- cgit v1.2.3 From 49365e69762715fb301db0bce3333e4c41dda810 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 20 Nov 2018 19:56:15 +0800 Subject: btrfs: silence warning if replace is canceled When we successfully cancel the device replace, its scrub worker returns -ECANCELED, which is then passed to btrfs_dev_replace_finishing. It cleans up based on the returned status and propagates the same -ECANCELED back the parent function. As of now only user can cancel the replace-scrub, so its ok to silence the warning here. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 85d93bd3b27a..ead4f3803527 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -503,7 +503,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, ret = btrfs_dev_replace_finishing(fs_info, ret); if (ret == -EINPROGRESS) { ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; - } else { + } else if (ret != -ECANCELED) { WARN_ON(ret); } @@ -966,7 +966,7 @@ static int btrfs_dev_replace_kthread(void *data) btrfs_device_get_total_bytes(dev_replace->srcdev), &dev_replace->scrub_progress, 0, 1); ret = btrfs_dev_replace_finishing(fs_info, ret); - WARN_ON(ret); + WARN_ON(ret && ret != -ECANCELED); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); return 0; -- cgit v1.2.3 From f9085abfaeaf45971044fad2281ffd917cde4526 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 20 Nov 2018 19:56:16 +0800 Subject: btrfs: don't report user-requested cancel as an error As of now only user requested replace cancel can cancel the replace-scrub so no need to log the error. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index ead4f3803527..3b272ff60fea 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -629,7 +629,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, src_device, tgt_device); } else { - btrfs_err_in_rcu(fs_info, + if (scrub_ret != -ECANCELED) + btrfs_err_in_rcu(fs_info, "btrfs_scrub_dev(%s, %llu, %s) failed %d", btrfs_dev_name(src_device), src_device->devid, -- cgit v1.2.3 From 129827e3001fd1e6892a0629b48f9c7c91cbb8b6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 Apr 2018 01:29:24 +0200 Subject: btrfs: dev-replace: swich locking to rw semaphore This is the first part of removing the custom locking and waiting scheme used for device replace. It was probably copied from extent buffer locking, but there's nothing that would require more than is provided by the common locking primitives. The rw spinlock protects waiting tasks counter in case of incompatible locks and the waitqueue. Same as rw semaphore. This patch only switches the locking primitive, for better bisectability. There should be no functional change other than the overhead of the locking and potential sleeping instead of spinning when the lock is contended. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/dev-replace.c | 12 ++++++------ fs/btrfs/disk-io.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 81cbbb24678e..b4f97120aecd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -378,7 +378,7 @@ struct btrfs_dev_replace { struct btrfs_device *tgtdev; struct mutex lock_finishing_cancel_unmount; - rwlock_t lock; + struct rw_semaphore rwsem; atomic_t blocking_readers; wait_queue_head_t read_lock_wq; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 3b272ff60fea..316a29278306 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -1002,12 +1002,12 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace) { - read_lock(&dev_replace->lock); + down_read(&dev_replace->rwsem); } void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace) { - read_unlock(&dev_replace->lock); + up_read(&dev_replace->rwsem); } void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace) @@ -1015,16 +1015,16 @@ void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace) again: wait_event(dev_replace->read_lock_wq, atomic_read(&dev_replace->blocking_readers) == 0); - write_lock(&dev_replace->lock); + down_write(&dev_replace->rwsem); if (atomic_read(&dev_replace->blocking_readers)) { - write_unlock(&dev_replace->lock); + up_write(&dev_replace->rwsem); goto again; } } void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace) { - write_unlock(&dev_replace->lock); + up_write(&dev_replace->rwsem); } /* inc blocking cnt and release read lock */ @@ -1033,7 +1033,7 @@ void btrfs_dev_replace_set_lock_blocking( { /* only set blocking for read lock */ atomic_inc(&dev_replace->blocking_readers); - read_unlock(&dev_replace->lock); + up_read(&dev_replace->rwsem); } void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 14d355d0cb7a..eca66ac52c7a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2141,7 +2141,7 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) { mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); - rwlock_init(&fs_info->dev_replace.lock); + init_rwsem(&fs_info->dev_replace.rwsem); atomic_set(&fs_info->dev_replace.blocking_readers, 0); init_waitqueue_head(&fs_info->dev_replace.replace_wait); init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); -- cgit v1.2.3 From 53176dde0acd8fa49c6c2e6097283acc6241480f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 Apr 2018 01:41:06 +0200 Subject: btrfs: dev-replace: remove custom read/write blocking scheme After the rw semaphore has been added, the custom blocking using ::blocking_readers and ::read_lock_wq is redundant. The blocking logic in __btrfs_map_block is replaced by extending the time the semaphore is held, that has the same blocking effect on writes as the previous custom scheme that waited until ::blocking_readers was zero. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 -- fs/btrfs/dev-replace.c | 16 ---------------- fs/btrfs/dev-replace.h | 1 - fs/btrfs/disk-io.c | 2 -- fs/btrfs/volumes.c | 13 ++++++------- 5 files changed, 6 insertions(+), 28 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b4f97120aecd..5fb4cb646c82 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -379,8 +379,6 @@ struct btrfs_dev_replace { struct mutex lock_finishing_cancel_unmount; struct rw_semaphore rwsem; - atomic_t blocking_readers; - wait_queue_head_t read_lock_wq; struct btrfs_scrub_progress scrub_progress; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 316a29278306..a71661f43dec 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -1012,14 +1012,7 @@ void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace) void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace) { -again: - wait_event(dev_replace->read_lock_wq, - atomic_read(&dev_replace->blocking_readers) == 0); down_write(&dev_replace->rwsem); - if (atomic_read(&dev_replace->blocking_readers)) { - up_write(&dev_replace->rwsem); - goto again; - } } void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace) @@ -1027,15 +1020,6 @@ void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace) up_write(&dev_replace->rwsem); } -/* inc blocking cnt and release read lock */ -void btrfs_dev_replace_set_lock_blocking( - struct btrfs_dev_replace *dev_replace) -{ - /* only set blocking for read lock */ - atomic_inc(&dev_replace->blocking_readers); - up_read(&dev_replace->rwsem); -} - void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) { percpu_counter_inc(&fs_info->dev_replace.bio_counter); diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 27e3bb0cca11..dd1dcb22c1e3 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -23,6 +23,5 @@ void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace); void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace); void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace); void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace); -void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace); #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index eca66ac52c7a..cbb7cf4a993d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2142,9 +2142,7 @@ static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) { mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); init_rwsem(&fs_info->dev_replace.rwsem); - atomic_set(&fs_info->dev_replace.blocking_readers, 0); init_waitqueue_head(&fs_info->dev_replace.replace_wait); - init_waitqueue_head(&fs_info->dev_replace.read_lock_wq); } static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6c806808b6a1..57174d00f74b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6145,10 +6145,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, btrfs_dev_replace_read_lock(dev_replace); dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); + /* + * Hold the semaphore for read during the whole operation, write is + * requested at commit time but must wait. + */ if (!dev_replace_is_ongoing) btrfs_dev_replace_read_unlock(dev_replace); - else - btrfs_dev_replace_set_lock_blocking(dev_replace); if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && !need_full_stripe(op) && dev_replace->tgtdev != NULL) { @@ -6343,11 +6345,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, } out: if (dev_replace_is_ongoing) { - ASSERT(atomic_read(&dev_replace->blocking_readers) > 0); - btrfs_dev_replace_read_lock(dev_replace); - /* Barrier implied by atomic_dec_and_test */ - if (atomic_dec_and_test(&dev_replace->blocking_readers)) - cond_wake_up_nomb(&dev_replace->read_lock_wq); + lockdep_assert_held(&dev_replace->rwsem); + /* Unlock and let waiting writers proceed */ btrfs_dev_replace_read_unlock(dev_replace); } free_extent_map(em); -- cgit v1.2.3 From cb5583dd52fab469a001a007385066fcd60629c5 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 7 Sep 2018 16:11:23 +0200 Subject: btrfs: dev-replace: open code trivial locking helpers The dev-replace locking functions are now trivial wrappers around rw semaphore that can be used directly everywhere. No functional change. Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 81 +++++++++++++++++++------------------------------- fs/btrfs/dev-replace.h | 4 --- fs/btrfs/reada.c | 12 ++++---- fs/btrfs/scrub.c | 15 +++++----- fs/btrfs/volumes.c | 14 ++++----- 5 files changed, 52 insertions(+), 74 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index a71661f43dec..6f7e890bf480 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -284,13 +284,13 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, struct btrfs_dev_replace_item *ptr; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; - btrfs_dev_replace_read_lock(dev_replace); + down_read(&dev_replace->rwsem); if (!dev_replace->is_valid || !dev_replace->item_needs_writeback) { - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); return 0; } - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); key.objectid = 0; key.type = BTRFS_DEV_REPLACE_KEY; @@ -348,7 +348,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_replace_item); - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); if (dev_replace->srcdev) btrfs_set_dev_replace_src_devid(eb, ptr, dev_replace->srcdev->devid); @@ -371,7 +371,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, btrfs_set_dev_replace_cursor_right(eb, ptr, dev_replace->cursor_right); dev_replace->item_needs_writeback = 0; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); btrfs_mark_buffer_dirty(eb); @@ -432,7 +432,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, } need_unlock = true; - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: @@ -470,7 +470,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, dev_replace->item_needs_writeback = 1; atomic64_set(&dev_replace->num_write_errors, 0); atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); need_unlock = false; ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device); @@ -484,7 +484,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, if (IS_ERR(trans)) { ret = PTR_ERR(trans); need_unlock = true; - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; dev_replace->srcdev = NULL; @@ -511,7 +511,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, leave: if (need_unlock) - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); btrfs_destroy_dev_replace_tgtdev(tgt_device); return ret; } @@ -579,18 +579,18 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, /* don't allow cancel or unmount to disturb the finishing procedure */ mutex_lock(&dev_replace->lock_finishing_cancel_unmount); - btrfs_dev_replace_read_lock(dev_replace); + down_read(&dev_replace->rwsem); /* was the operation canceled, or is it finished? */ if (dev_replace->replace_state != BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) { - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); return 0; } tgt_device = dev_replace->tgtdev; src_device = dev_replace->srcdev; - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); /* * flush all outstanding I/O and inode extent mappings before the @@ -614,7 +614,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, /* keep away write_all_supers() during the finishing procedure */ mutex_lock(&fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); dev_replace->replace_state = scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED; @@ -635,7 +635,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, btrfs_dev_name(src_device), src_device->devid, rcu_str_deref(tgt_device->name), scrub_ret); - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_rm_dev_replace_blocked(fs_info); @@ -671,8 +671,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); fs_info->fs_devices->rw_devices++; - btrfs_dev_replace_write_unlock(dev_replace); - + up_write(&dev_replace->rwsem); btrfs_rm_dev_replace_blocked(fs_info); btrfs_rm_dev_replace_remove_srcdev(src_device); @@ -769,7 +768,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, { struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; - btrfs_dev_replace_read_lock(dev_replace); + down_read(&dev_replace->rwsem); /* even if !dev_replace_is_valid, the values are good enough for * the replace_status ioctl */ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; @@ -781,7 +780,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, args->status.num_uncorrectable_read_errors = atomic64_read(&dev_replace->num_uncorrectable_read_errors); args->status.progress_1000 = btrfs_dev_replace_progress(fs_info); - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); } int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) @@ -798,18 +797,18 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) return -EROFS; mutex_lock(&dev_replace->lock_finishing_cancel_unmount); - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: tgt_device = dev_replace->tgtdev; src_device = dev_replace->srcdev; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); ret = btrfs_scrub_cancel(fs_info); if (ret < 0) { result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED; @@ -840,7 +839,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) dev_replace->time_stopped = ktime_get_real_seconds(); dev_replace->item_needs_writeback = 1; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); /* Scrub for replace must not be running in suspended state */ ret = btrfs_scrub_cancel(fs_info); @@ -875,7 +874,8 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; mutex_lock(&dev_replace->lock_finishing_cancel_unmount); - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); + switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: @@ -891,7 +891,7 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) break; } - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); } @@ -901,12 +901,13 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) struct task_struct *task; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); + switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); return 0; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: break; @@ -922,10 +923,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) "you may cancel the operation after 'mount -o degraded'"); dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); return 0; } - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); /* * This could collide with a paused balance, but the exclusive op logic @@ -933,10 +934,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) * dev-replace to start anyway. */ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { - btrfs_dev_replace_write_lock(dev_replace); + down_write(&dev_replace->rwsem); dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; - btrfs_dev_replace_write_unlock(dev_replace); + up_write(&dev_replace->rwsem); btrfs_info(fs_info, "cannot resume dev-replace, other exclusive operation running"); return 0; @@ -1000,26 +1001,6 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) return 1; } -void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace) -{ - down_read(&dev_replace->rwsem); -} - -void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace) -{ - up_read(&dev_replace->rwsem); -} - -void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace) -{ - down_write(&dev_replace->rwsem); -} - -void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace) -{ - up_write(&dev_replace->rwsem); -} - void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) { percpu_counter_inc(&fs_info->dev_replace.bio_counter); diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index dd1dcb22c1e3..4aa40bacc6cc 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -19,9 +19,5 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info); int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info); int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); -void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace); -void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace); -void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace); -void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace); #endif diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 6f81f3e88b6d..10d9589001a9 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -377,10 +377,10 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, } /* Insert extent in reada tree + all per-device trees, all or nothing */ - btrfs_dev_replace_read_lock(&fs_info->dev_replace); + down_read(&fs_info->dev_replace.rwsem); ret = radix_tree_preload(GFP_KERNEL); if (ret) { - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); goto error; } @@ -391,13 +391,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, re_exist->refcnt++; spin_unlock(&fs_info->reada_lock); radix_tree_preload_end(); - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); goto error; } if (ret) { spin_unlock(&fs_info->reada_lock); radix_tree_preload_end(); - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); goto error; } radix_tree_preload_end(); @@ -439,13 +439,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, } radix_tree_delete(&fs_info->reada_tree, index); spin_unlock(&fs_info->reada_lock); - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); goto error; } have_zone = 1; } spin_unlock(&fs_info->reada_lock); - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); if (!have_zone) goto error; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bbd1b36f4918..3f664e692831 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3617,11 +3617,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, break; } - btrfs_dev_replace_write_lock(&fs_info->dev_replace); + down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; - btrfs_dev_replace_write_unlock(&fs_info->dev_replace); + up_write(&dev_replace->rwsem); + ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache); @@ -3657,10 +3658,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); - btrfs_dev_replace_write_lock(&fs_info->dev_replace); + down_write(&fs_info->dev_replace.rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; - btrfs_dev_replace_write_unlock(&fs_info->dev_replace); + up_write(&fs_info->dev_replace.rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); @@ -3860,16 +3861,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, return -EIO; } - btrfs_dev_replace_read_lock(&fs_info->dev_replace); + down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return -EINPROGRESS; } - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); ret = scrub_workers_get(fs_info, is_dev_replace); if (ret) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 57174d00f74b..367787670c9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2106,12 +2106,12 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) { u64 num_devices = fs_info->fs_devices->num_devices; - btrfs_dev_replace_read_lock(&fs_info->dev_replace); + down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { ASSERT(num_devices > 1); num_devices--; } - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); return num_devices; } @@ -5559,11 +5559,11 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) ret = 1; free_extent_map(em); - btrfs_dev_replace_read_lock(&fs_info->dev_replace); + down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && fs_info->dev_replace.tgtdev) ret++; - btrfs_dev_replace_read_unlock(&fs_info->dev_replace); + up_read(&fs_info->dev_replace.rwsem); return ret; } @@ -6143,14 +6143,14 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, if (!bbio_ret) goto out; - btrfs_dev_replace_read_lock(dev_replace); + down_read(&dev_replace->rwsem); dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); /* * Hold the semaphore for read during the whole operation, write is * requested at commit time but must wait. */ if (!dev_replace_is_ongoing) - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && !need_full_stripe(op) && dev_replace->tgtdev != NULL) { @@ -6347,7 +6347,7 @@ out: if (dev_replace_is_ongoing) { lockdep_assert_held(&dev_replace->rwsem); /* Unlock and let waiting writers proceed */ - btrfs_dev_replace_read_unlock(dev_replace); + up_read(&dev_replace->rwsem); } free_extent_map(em); return ret; -- cgit v1.2.3 From 52042d8e82ff50d40e76a275ac0b97aa663328b0 Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Wed, 28 Nov 2018 12:05:13 +0100 Subject: btrfs: Fix typos in comments and strings The typos accumulate over time so once in a while time they get fixed in a large patch. Signed-off-by: Andrea Gelmini Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/backref.c | 4 ++-- fs/btrfs/check-integrity.c | 2 +- fs/btrfs/compression.c | 4 ++-- fs/btrfs/ctree.c | 4 ++-- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/extent-tree.c | 22 +++++++++++----------- fs/btrfs/extent_io.c | 4 ++-- fs/btrfs/extent_io.h | 2 +- fs/btrfs/extent_map.c | 3 ++- fs/btrfs/file.c | 6 +++--- fs/btrfs/inode.c | 10 +++++----- fs/btrfs/lzo.c | 2 +- fs/btrfs/qgroup.c | 14 +++++++------- fs/btrfs/qgroup.h | 4 ++-- fs/btrfs/raid56.c | 2 +- fs/btrfs/ref-verify.c | 6 +++--- fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 2 +- fs/btrfs/send.c | 4 ++-- fs/btrfs/super.c | 8 ++++---- fs/btrfs/transaction.c | 4 ++-- fs/btrfs/tree-checker.c | 6 +++--- fs/btrfs/tree-log.c | 4 ++-- fs/btrfs/volumes.c | 14 +++++++------- 25 files changed, 70 insertions(+), 69 deletions(-) (limited to 'fs/btrfs/dev-replace.c') diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 4a15f87dbbb4..78556447e1d5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -591,7 +591,7 @@ unode_aux_to_inode_list(struct ulist_node *node) } /* - * We maintain three seperate rbtrees: one for direct refs, one for + * We maintain three separate rbtrees: one for direct refs, one for * indirect refs which have a key, and one for indirect refs which do not * have a key. Each tree does merge on insertion. * @@ -695,7 +695,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, } /* - * Now it's a direct ref, put it in the the direct tree. We must + * Now it's a direct ref, put it in the direct tree. We must * do this last because the ref could be merged/freed here. */ prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 84e9729badaa..b0c8094528d1 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -2327,7 +2327,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, * write operations. Therefore it keeps the linkage * information for a block until a block is * rewritten. This can temporarily cause incorrect - * and even circular linkage informations. This + * and even circular linkage information. This * causes no harm unless such blocks are referenced * by the most recent super block. */ diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d5381f39a9e8..548057630b69 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1203,7 +1203,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, /* * Shannon Entropy calculation * - * Pure byte distribution analysis fails to determine compressiability of data. + * Pure byte distribution analysis fails to determine compressibility of data. * Try calculating entropy to estimate the average minimum number of bits * needed to encode the sampled data. * @@ -1267,7 +1267,7 @@ static u8 get4bits(u64 num, int shift) { /* * Use 4 bits as radix base - * Use 16 u32 counters for calculating new possition in buf array + * Use 16 u32 counters for calculating new position in buf array * * @array - array that will be sorted * @array_buf - buffer array to store sorting results diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 4252e89df6ae..d92462fe66c8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1414,7 +1414,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, * * What is forced COW: * when we create snapshot during committing the transaction, - * after we've finished coping src root, we must COW the shared + * after we've finished copying src root, we must COW the shared * block to ensure the metadata consistency. */ if (btrfs_header_generation(buf) == trans->transid && @@ -3771,7 +3771,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root /* Key greater than all keys in the leaf, right neighbor has * enough room for it and we're not emptying our leaf to delete * it, therefore use right neighbor to insert the new item and - * no need to touch/dirty our left leaft. */ + * no need to touch/dirty our left leaf. */ btrfs_tree_unlock(left); free_extent_buffer(left); path->nodes[0] = right; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 6f7e890bf480..8750c835f535 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -991,7 +991,7 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) * something that can happen if the dev_replace * procedure is suspended by an umount and then * the tgtdev is missing (or "btrfs dev scan") was - * not called and the the filesystem is remounted + * not called and the filesystem is remounted * in degraded state. This does not stop the * dev_replace procedure. It needs to be canceled * manually if the cancellation is wanted. diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2f934a0b2148..8da2f380d3c0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3100,7 +3100,7 @@ retry_root_backup: if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) { btrfs_warn(fs_info, - "writeable mount is not allowed due to too many missing devices"); + "writable mount is not allowed due to too many missing devices"); goto fail_sysfs; } @@ -4077,7 +4077,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* * This is a fast path so only do this check if we have sanity tests - * enabled. Normal people shouldn't be using umapped buffers as dirty + * enabled. Normal people shouldn't be using unmapped buffers as dirty * outside of the sanity tests. */ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8a9ce33dfdbc..b15afeae16df 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1055,7 +1055,7 @@ out_free: /* * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, - * is_data == BTRFS_REF_TYPE_DATA, data type is requried, + * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, * is_data == BTRFS_REF_TYPE_ANY, either type is OK. */ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, @@ -3705,7 +3705,7 @@ again: } } - /* if its not on the io list, we need to put the block group */ + /* if it's not on the io list, we need to put the block group */ if (should_put) btrfs_put_block_group(cache); if (drop_reserve) @@ -4675,7 +4675,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, /* * If we have dup, raid1 or raid10 then only half of the free - * space is actually useable. For raid56, the space info used + * space is actually usable. For raid56, the space info used * doesn't include the parity drive, so we don't have to * change the math */ @@ -5302,7 +5302,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * @orig_bytes - the number of bytes we want * @flush - whether or not we can flush to make our reservation * - * This will reserve orgi_bytes number of bytes from the space info associated + * This will reserve orig_bytes number of bytes from the space info associated * with the block_rsv. If there is not enough space it will make an attempt to * flush out space to make room. It will do this by flushing delalloc if * possible or committing the transaction. If flush is 0 then no attempts to @@ -5771,11 +5771,11 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, /** * btrfs_inode_rsv_refill - refill the inode block rsv. * @inode - the inode we are refilling. - * @flush - the flusing restriction. + * @flush - the flushing restriction. * * Essentially the same as btrfs_block_rsv_refill, except it uses the * block_rsv->size as the minimum size. We'll either refill the missing amount - * or return if we already have enough space. This will also handle the resreve + * or return if we already have enough space. This will also handle the reserve * tracepoint for the reserved amount. */ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, @@ -8500,7 +8500,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, buf->log_index = root->log_transid % 2; /* * we allow two log transactions at a time, use different - * EXENT bit to differentiate dirty pages. + * EXTENT bit to differentiate dirty pages. */ if (buf->log_index == 0) set_extent_dirty(&root->dirty_log_pages, buf->start, @@ -9762,7 +9762,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) } /* - * checks to see if its even possible to relocate this block group. + * Checks to see if it's even possible to relocate this block group. * * @return - -1 if it's not a good idea to relocate this block group, 0 if its * ok to go ahead and try. @@ -10390,7 +10390,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't * find any space, or we are empty, and we can just add all - * the space in and be done with it. This saves us _alot_ of + * the space in and be done with it. This saves us _a_lot_ of * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { @@ -10660,7 +10660,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, mutex_lock(&trans->transaction->cache_write_mutex); /* - * make sure our free spache cache IO is done before remove the + * Make sure our free space cache IO is done before removing the * free space inode */ spin_lock(&trans->transaction->dirty_bgs_lock); @@ -11177,7 +11177,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, if (!blk_queue_discard(bdev_get_queue(device->bdev))) return 0; - /* Not writeable = nothing to do. */ + /* Not writable = nothing to do. */ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) return 0; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7b489988d811..fc126b92ea59 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -492,7 +492,7 @@ static struct extent_state *next_state(struct extent_state *state) /* * utility function to clear some bits in an extent state struct. - * it will optionally wake up any one waiting on this state (wake == 1). + * it will optionally wake up anyone waiting on this state (wake == 1). * * If no bits are set on the state struct after clearing things, the * struct is freed and removed from the tree @@ -4312,7 +4312,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, /* * Sanity check, extent_fiemap() should have ensured that new - * fiemap extent won't overlap with cahced one. + * fiemap extent won't overlap with cached one. * Not recoverable. * * NOTE: Physical address can overlap, due to compression diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 04eefa93fe1f..9673be3f3d1f 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -98,7 +98,7 @@ typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, struct extent_io_ops { /* - * The following callbacks must be allways defined, the function + * The following callbacks must be always defined, the function * pointer will be called unconditionally. */ extent_submit_bio_hook_t *submit_bio_hook; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 7eea8b6e2cd3..a042a193c120 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -475,7 +475,8 @@ static struct extent_map *prev_extent_map(struct extent_map *em) return container_of(prev, struct extent_map, rb_node); } -/* helper for btfs_get_extent. Given an existing extent in the tree, +/* + * Helper for btrfs_get_extent. Given an existing extent in the tree, * the existing extent is the nearest extent to map_start, * and an extent that you want to insert, deal with overlap and insert * the best fitted new extent into the tree. diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 81aae230d1a5..d38dc8c31533 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2005,7 +2005,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp) filp->private_data = NULL; /* - * ordered_data_close is set by settattr when we are about to truncate + * ordered_data_close is set by setattr when we are about to truncate * a file from a non-zero size to a zero size. This tries to * flush down new bytes that may have been written if the * application were using truncate to replace a file in place. @@ -2114,7 +2114,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) /* * We have to do this here to avoid the priority inversion of waiting on - * IO of a lower priority task while holding a transaciton open. + * IO of a lower priority task while holding a transaction open. */ ret = btrfs_wait_ordered_range(inode, start, len); if (ret) { @@ -2154,7 +2154,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) * here we could get into a situation where we're waiting on IO to * happen that is blocked on a transaction trying to commit. With start * we inc the extwriter counter, so we wait for all extwriters to exit - * before we start blocking join'ers. This comment is to keep somebody + * before we start blocking joiners. This comment is to keep somebody * from thinking they are super smart and changing this to * btrfs_join_transaction *cough*Josef*cough*. */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3d29505971fe..43eb4535319d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -104,7 +104,7 @@ static void __endio_write_update_ordered(struct inode *inode, /* * Cleanup all submitted ordered extents in specified range to handle errors - * from the fill_dellaloc() callback. + * from the btrfs_run_delalloc_range() callback. * * NOTE: caller must ensure that when an error happens, it can not call * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING @@ -1842,7 +1842,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode, /* * We don't reserve metadata space for space cache inodes so we - * don't need to call dellalloc_release_metadata if there is an + * don't need to call delalloc_release_metadata if there is an * error. */ if (*bits & EXTENT_CLEAR_META_RESV && @@ -4516,7 +4516,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, /* * This function is also used to drop the items in the log tree before * we relog the inode, so if root != BTRFS_I(inode)->root, it means - * it is used to drop the loged items. So we shouldn't kill the delayed + * it is used to drop the logged items. So we shouldn't kill the delayed * items. */ if (min_type == 0 && root == BTRFS_I(inode)->root) @@ -5108,7 +5108,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) truncate_setsize(inode, newsize); - /* Disable nonlocked read DIO to avoid the end less truncate */ + /* Disable nonlocked read DIO to avoid the endless truncate */ btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); inode_dio_wait(inode); btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); @@ -8052,7 +8052,7 @@ static void __endio_write_update_ordered(struct inode *inode, return; /* * Our bio might span multiple ordered extents. In this case - * we keep goin until we have accounted the whole dio. + * we keep going until we have accounted the whole dio. */ if (ordered_offset < offset + bytes) { ordered_bytes = offset + bytes - ordered_offset; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index b6a4cc178bee..90639140439f 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -27,7 +27,7 @@ * Records the total size (including the header) of compressed data. * * 2. Segment(s) - * Variable size. Each segment includes one segment header, followd by data + * Variable size. Each segment includes one segment header, followed by data * payload. * One regular LZO compressed extent can have one or more segments. * For inlined LZO compressed extent, only one segment is allowed. diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 2272419ade7e..4e473a998219 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -30,7 +30,7 @@ * - sync * - copy also limits on subvol creation * - limit - * - caches fuer ulists + * - caches for ulists * - performance benchmarks * - check all ioctl parameters */ @@ -522,7 +522,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) __del_qgroup_rb(qgroup); } /* - * we call btrfs_free_qgroup_config() when umounting + * We call btrfs_free_qgroup_config() when unmounting * filesystem and disabling quota, so we set qgroup_ulist * to be null here to avoid double free. */ @@ -1128,7 +1128,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, * The easy accounting, we're updating qgroup relationship whose child qgroup * only has exclusive extents. * - * In this case, all exclsuive extents will also be exlusive for parent, so + * In this case, all exclusive extents will also be exclusive for parent, so * excl/rfer just get added/removed. * * So is qgroup reservation space, which should also be added/removed to @@ -1755,14 +1755,14 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level) * * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. - * They should be marked during preivous (@dst_level = 1) iteration. + * They should be marked during previous (@dst_level = 1) iteration. * * 3) Mark file extents in leaves dirty * We don't have good way to pick out new file extents only. * So we still follow the old method by scanning all file extents in * the leave. * - * This function can free us from keeping two pathes, thus later we only need + * This function can free us from keeping two paths, thus later we only need * to care about how to iterate all new tree blocks in reloc tree. */ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, @@ -1901,7 +1901,7 @@ out: * * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace * above tree blocks along with their counter parts in file tree. - * While during search, old tree blocsk OO(c) will be skiped as tree block swap + * While during search, old tree blocks OO(c) will be skipped as tree block swap * won't affect OO(c). */ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, @@ -2026,7 +2026,7 @@ out: * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and * @dst_slot), and find any tree blocks whose generation is at @last_snapshot, * and then go down @src_eb (pointed by @src_parent and @src_slot) to find - * the conterpart of the tree block, then mark both tree blocks as qgroup dirty, + * the counterpart of the tree block, then mark both tree blocks as qgroup dirty, * and skip all tree blocks whose generation is smaller than last_snapshot. * * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(), diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index e4e6ee44073a..20c6bd5fa701 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -81,10 +81,10 @@ enum btrfs_qgroup_rsv_type { * * Each type should have different reservation behavior. * E.g, data follows its io_tree flag modification, while - * *currently* meta is just reserve-and-clear during transcation. + * *currently* meta is just reserve-and-clear during transaction. * * TODO: Add new type for reservation which can survive transaction commit. - * Currect metadata reservation behavior is not suitable for such case. + * Current metadata reservation behavior is not suitable for such case. */ struct btrfs_qgroup_rsv { u64 values[BTRFS_QGROUP_RSV_LAST]; diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index df41d7049936..e74455eb42f9 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1980,7 +1980,7 @@ cleanup_io: * - In case of single failure, where rbio->failb == -1: * * Cache this rbio iff the above read reconstruction is - * excuted without problems. + * executed without problems. */ if (err == BLK_STS_OK && rbio->failb < 0) cache_rbio_pages(rbio); diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index d69fbfb30aa9..c3557c12656b 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -43,7 +43,7 @@ struct ref_entry { * back to the delayed ref action. We hold the ref we are changing in the * action so we can account for the history properly, and we record the root we * were called with since it could be different from ref_root. We also store - * stack traces because thats how I roll. + * stack traces because that's how I roll. */ struct ref_action { int action; @@ -56,7 +56,7 @@ struct ref_action { /* * One of these for every block we reference, it holds the roots and references - * to it as well as all of the ref actions that have occured to it. We never + * to it as well as all of the ref actions that have occurred to it. We never * free it until we unmount the file system in order to make sure re-allocations * are happening properly. */ @@ -859,7 +859,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes, * This shouldn't happen because we will add our re * above when we lookup the be with !parent, but just in * case catch this case so we don't panic because I - * didn't thik of some other corner case. + * didn't think of some other corner case. */ btrfs_err(fs_info, "failed to find root %llu for %llu", root->root_key.objectid, be->bytenr); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 35bfe014712a..272b287f8cf0 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2631,7 +2631,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans, * only one thread can access block_rsv at this point, * so we don't need hold lock to protect block_rsv. * we expand more reservation size here to allow enough - * space for relocation and we will return eailer in + * space for relocation and we will return earlier in * enospc case. */ rc->block_rsv->size = tmp + fs_info->nodesize * diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 099eb3c8f86b..6dcd36d7b849 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3554,7 +3554,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, if (!ret && sctx->is_dev_replace) { /* * If we are doing a device replace wait for any tasks - * that started dellaloc right before we set the block + * that started delalloc right before we set the block * group to RO mode, as they might have just allocated * an extent from it or decided they could do a nocow * write. And if any such tasks did that, wait for their diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 9df4c0b0e789..1b15b43905f8 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -2238,7 +2238,7 @@ out: * inodes "orphan" name instead of the real name and stop. Same with new inodes * that were not created yet and overwritten inodes/refs. * - * When do we have have orphan inodes: + * When do we have orphan inodes: * 1. When an inode is freshly created and thus no valid refs are available yet * 2. When a directory lost all it's refs (deleted) but still has dir items * inside which were not processed yet (pending for move/delete). If anyone @@ -3854,7 +3854,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) /* * We may have refs where the parent directory does not exist * yet. This happens if the parent directories inum is higher - * the the current inum. To handle this case, we create the + * than the current inum. To handle this case, we create the * parent directory out of order. But we need to check if this * did already happen before due to other refs in the same dir. */ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d3c6bbc0aa3a..368a5b9e6c13 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -93,7 +93,7 @@ const char *btrfs_decode_error(int errno) /* * __btrfs_handle_fs_error decodes expected errors from the caller and - * invokes the approciate error response. + * invokes the appropriate error response. */ __cold void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, @@ -151,7 +151,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function * although there is no way to update the progress. It would add the * risk of a deadlock, therefore the canceling is omitted. The only * penalty is that some I/O remains active until the procedure - * completes. The next time when the filesystem is mounted writeable + * completes. The next time when the filesystem is mounted writable * again, the device replace operation continues. */ } @@ -1848,7 +1848,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) if (!btrfs_check_rw_degradable(fs_info, NULL)) { btrfs_warn(fs_info, - "too many missing devices, writeable remount is not allowed"); + "too many missing devices, writable remount is not allowed"); ret = -EACCES; goto restore; } @@ -2312,7 +2312,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * device_list_mutex here as we only read the device data and the list * is protected by RCU. Even if a device is deleted during the list * traversals, we'll get valid data, the freeing callback will wait at - * least until until the rcu_read_unlock. + * least until the rcu_read_unlock. */ rcu_read_lock(); cur_devices = fs_info->fs_devices; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 39d3b4b14098..127fa1535f58 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -699,7 +699,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) /* * btrfs_attach_transaction_barrier() - catch the running transaction * - * It is similar to the above function, the differentia is this one + * It is similar to the above function, the difference is this one * will wait for all the inactive transactions until they fully * complete. */ @@ -1329,7 +1329,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, return 0; /* - * Ensure dirty @src will be commited. Or, after comming + * Ensure dirty @src will be committed. Or, after coming * commit_fs_roots() and switch_commit_roots(), any dirty but not * recorded root will never be updated again, causing an outdated root * item. diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 1a4e2b101ef2..a62e1e837a89 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -27,10 +27,10 @@ * * @type: leaf or node * @identifier: the necessary info to locate the leaf/node. - * It's recommened to decode key.objecitd/offset if it's + * It's recommended to decode key.objecitd/offset if it's * meaningful. * @reason: describe the error - * @bad_value: optional, it's recommened to output bad value and its + * @bad_value: optional, it's recommended to output bad value and its * expected value (range). * * Since comma is used to separate the components, only space is allowed @@ -130,7 +130,7 @@ static int check_extent_data_item(struct btrfs_fs_info *fs_info, } /* - * Support for new compression/encrption must introduce incompat flag, + * Support for new compression/encryption must introduce incompat flag, * and must be caught in open_ctree(). */ if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5baad9bebc62..ac232b3d6d7e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1144,7 +1144,7 @@ next: } btrfs_release_path(path); - /* look for a conflicing name */ + /* look for a conflicting name */ di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, namelen, 0); if (di && !IS_ERR(di)) { @@ -3149,7 +3149,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&log_root_tree->log_mutex); /* - * nobody else is going to jump in and write the the ctree + * Nobody else is going to jump in and write the ctree * super here because the log_commit atomic below is protecting * us. We must be called with a transaction handle pinning * the running transaction open, so a full commit can't hop diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a735576471a9..2576b1a379c9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -212,7 +212,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * the mutex can be very coarse and can cover long-running operations * * protects: updates to fs_devices counters like missing devices, rw devices, - * seeding, structure cloning, openning/closing devices at mount/umount time + * seeding, structure cloning, opening/closing devices at mount/umount time * * global::fs_devs - add, remove, updates to the global list * @@ -5047,7 +5047,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, BUG_ON(1); } - /* we don't want a chunk larger than 10% of writeable space */ + /* We don't want a chunk larger than 10% of writable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); @@ -5355,10 +5355,10 @@ out: } /* - * Chunk allocation falls into two parts. The first part does works - * that make the new allocated chunk useable, but not do any operation - * that modifies the chunk tree. The second part does the works that - * require modifying the chunk tree. This division is important for the + * Chunk allocation falls into two parts. The first part does work + * that makes the new allocated chunk usable, but does not do any operation + * that modifies the chunk tree. The second part does the work that + * requires modifying the chunk tree. This division is important for the * bootstrap process of adding storage to a seed btrfs. */ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) @@ -7256,7 +7256,7 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, if (missing > max_tolerated) { if (!failing_dev) btrfs_warn(fs_info, - "chunk %llu missing %d devices, max tolerance is %d for writeable mount", + "chunk %llu missing %d devices, max tolerance is %d for writable mount", em->start, missing, max_tolerated); free_extent_map(em); ret = false; -- cgit v1.2.3