summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2023-03-21 14:13:49 +0300
committerDavid Sterba <dsterba@suse.com>2023-04-17 19:01:19 +0300
commita8fdc05172d0e52ad7812c07be8f753d63779539 (patch)
tree60fedef09220bcaa3a73945f005e175cac1169a4 /fs/btrfs/extent-tree.c
parent4e8313e53c83d11510b9f323861a6f83757529ed (diff)
downloadlinux-a8fdc05172d0e52ad7812c07be8f753d63779539.tar.xz
btrfs: remove obsolete delayed ref throttling logic when truncating items
We have this logic encapsulated in btrfs_should_throttle_delayed_refs() where we try to estimate if running the current amount of delayed references we have will take more than half a second, and if so, the caller btrfs_should_throttle_delayed_refs() should do something to prevent more and more delayed refs from being accumulated. This logic was added in commit 0a2b2a844af6 ("Btrfs: throttle delayed refs better") and then further refined in commit a79b7d4b3e81 ("Btrfs: async delayed refs"). The idea back then was that the caller of btrfs_should_throttle_delayed_refs() would release its transaction handle (by calling btrfs_end_transaction()) when that function returned true, then btrfs_end_transaction() would trigger an async job to run delayed references in a workqueue, and later start/join a transaction again and do more work. However we don't run delayed references asynchronously anymore, that was removed in commit db2462a6ad3d ("btrfs: don't run delayed refs in the end transaction logic"). That makes the logic that tries to estimate how long we will take to run our current delayed references, at btrfs_should_throttle_delayed_refs(), pointless as we don't take any action to run delayed references anymore. We do have other type of throttling, which consists of checking the size and reserved space of the delayed and global block reserves, as well as if fluhsing delayed references for the current transaction was already started, etc - this is all done by btrfs_should_end_transaction(), and the only user of btrfs_should_throttle_delayed_refs() does periodically call btrfs_should_end_transaction(). So remove btrfs_should_throttle_delayed_refs() and the infrastructure that keeps track of the average time used for running delayed references, as well as adapting btrfs_truncate_inode_items() to call btrfs_check_space_for_delayed_refs() instead. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c27
1 files changed, 2 insertions, 25 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6b6c59e6805c..5cd289de4e92 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
}
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *locked_ref,
- unsigned long *run_refs)
+ struct btrfs_delayed_ref_head *locked_ref)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
return -EAGAIN;
}
- (*run_refs)++;
ref->in_tree = 0;
rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *locked_ref = NULL;
- ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
- unsigned long actual_count = 0;
delayed_refs = &trans->transaction->delayed_refs;
do {
@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
- ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
- &actual_count);
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
if (ret < 0 && ret != -EAGAIN) {
/*
* Error, btrfs_run_delayed_refs_for_head already
@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
cond_resched();
} while ((nr != -1 && count < nr) || locked_ref);
- /*
- * We don't want to include ref heads since we can have empty ref heads
- * and those will drastically skew our runtime down since we just do
- * accounting, no actual extent tree updates.
- */
- if (actual_count > 0) {
- u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
- u64 avg;
-
- /*
- * We weigh the current average higher than our current runtime
- * to avoid large swings in the average.
- */
- spin_lock(&delayed_refs->lock);
- avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
- fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
- spin_unlock(&delayed_refs->lock);
- }
return 0;
}