diff options
author | Josef Bacik <josef@toxicpanda.com> | 2022-07-15 22:45:24 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2022-09-26 13:27:54 +0300 |
commit | 3349b57fd47b5de10936bd8928db546b20c9fb91 (patch) | |
tree | 12dce14e91153e4ae52b8bf7b525bb366c0ec8f8 /fs/btrfs | |
parent | 723de71d41f50709eaf2eef1b08aa409687a3c97 (diff) | |
download | linux-3349b57fd47b5de10936bd8928db546b20c9fb91.tar.xz |
btrfs: convert block group bit field to use bit helpers
We use a bit field in the btrfs_block_group for different flags, however
this is awkward because we have to hold the block_group->lock for any
modification of any of these fields, and makes the code clunky for a few
of these flags. Convert these to a properly flags setup so we can
utilize the bit helpers.
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/block-group.c | 25 | ||||
-rw-r--r-- | fs/btrfs/block-group.h | 21 | ||||
-rw-r--r-- | fs/btrfs/dev-replace.c | 6 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 5 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 16 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 12 | ||||
-rw-r--r-- | fs/btrfs/space-info.c | 2 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 9 | ||||
-rw-r--r-- | fs/btrfs/zoned.c | 31 |
9 files changed, 71 insertions, 56 deletions
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index d83c044d3003..601743f22a9a 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -772,7 +772,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) WARN_ON(cache->caching_ctl); cache->caching_ctl = caching_ctl; cache->cached = BTRFS_CACHE_STARTED; - cache->has_caching_ctl = 1; + set_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &cache->runtime_flags); spin_unlock(&cache->lock); write_lock(&fs_info->block_group_cache_lock); @@ -988,11 +988,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, kobject_put(kobj); } - if (block_group->has_caching_ctl) + + if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags)) caching_ctl = btrfs_get_caching_control(block_group); if (block_group->cached == BTRFS_CACHE_STARTED) btrfs_wait_block_group_cache_done(block_group); - if (block_group->has_caching_ctl) { + if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags)) { write_lock(&fs_info->block_group_cache_lock); if (!caching_ctl) { struct btrfs_caching_control *ctl; @@ -1034,12 +1035,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, < block_group->zone_unusable); WARN_ON(block_group->space_info->disk_total < block_group->length * factor); - WARN_ON(block_group->zone_is_active && + WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + &block_group->runtime_flags) && block_group->space_info->active_total_bytes < block_group->length); } block_group->space_info->total_bytes -= block_group->length; - if (block_group->zone_is_active) + if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) block_group->space_info->active_total_bytes -= block_group->length; block_group->space_info->bytes_readonly -= (block_group->length - block_group->zone_unusable); @@ -1069,7 +1071,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, goto out; spin_lock(&block_group->lock); - block_group->removed = 1; + set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); + /* * At this point trimming or scrub can't start on this block group, * because we removed the block group from the rbtree @@ -2409,7 +2412,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ret = insert_block_group_item(trans, block_group); if (ret) btrfs_abort_transaction(trans, ret); - if (!block_group->chunk_item_inserted) { + if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, + &block_group->runtime_flags)) { mutex_lock(&fs_info->chunk_mutex); ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); mutex_unlock(&fs_info->chunk_mutex); @@ -3955,7 +3959,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) while (block_group) { btrfs_wait_block_group_cache_done(block_group); spin_lock(&block_group->lock); - if (block_group->iref) + if (test_bit(BLOCK_GROUP_FLAG_IREF, + &block_group->runtime_flags)) break; spin_unlock(&block_group->lock); block_group = btrfs_next_block_group(block_group); @@ -3968,7 +3973,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) } inode = block_group->inode; - block_group->iref = 0; + clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags); block_group->inode = NULL; spin_unlock(&block_group->lock); ASSERT(block_group->io_ctl.inode == NULL); @@ -4110,7 +4115,7 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) spin_lock(&block_group->lock); cleanup = (atomic_dec_and_test(&block_group->frozen) && - block_group->removed); + test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); spin_unlock(&block_group->lock); if (cleanup) { diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 6b3cdc4cbc41..2a2cae6038c7 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -46,6 +46,18 @@ enum btrfs_chunk_alloc_enum { CHUNK_ALLOC_FORCE_FOR_EXTENT, }; +/* Block group flags set at runtime */ +enum btrfs_block_group_flags { + BLOCK_GROUP_FLAG_IREF, + BLOCK_GROUP_FLAG_HAS_CACHING_CTL, + BLOCK_GROUP_FLAG_REMOVED, + BLOCK_GROUP_FLAG_TO_COPY, + BLOCK_GROUP_FLAG_RELOCATING_REPAIR, + BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, + BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, +}; + struct btrfs_caching_control { struct list_head list; struct mutex mutex; @@ -95,16 +107,9 @@ struct btrfs_block_group { /* For raid56, this is a full stripe, without parity */ unsigned long full_stripe_len; + unsigned long runtime_flags; unsigned int ro; - unsigned int iref:1; - unsigned int has_caching_ctl:1; - unsigned int removed:1; - unsigned int to_copy:1; - unsigned int relocating_repair:1; - unsigned int chunk_item_inserted:1; - unsigned int zone_is_active:1; - unsigned int zoned_data_reloc_ongoing:1; int disk_cache_state; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 41cddd3ff059..46b608260b33 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -546,7 +546,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, continue; spin_lock(&cache->lock); - cache->to_copy = 1; + set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); spin_unlock(&cache->lock); btrfs_put_block_group(cache); @@ -577,7 +577,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, return true; spin_lock(&cache->lock); - if (cache->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { spin_unlock(&cache->lock); return true; } @@ -611,7 +611,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, /* Last stripe on this device */ spin_lock(&cache->lock); - cache->to_copy = 0; + clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); spin_unlock(&cache->lock); return true; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6914cd8024ba..86ac953c69ac 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3804,7 +3804,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, block_group->start == fs_info->data_reloc_bg || fs_info->data_reloc_bg == 0); - if (block_group->ro || block_group->zoned_data_reloc_ongoing) { + if (block_group->ro || + test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { ret = 1; goto out; } @@ -3881,7 +3882,7 @@ out: * regular extents) at the same time to the same zone, which * easily break the write pointer. */ - block_group->zoned_data_reloc_ongoing = 1; + set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); fs_info->data_reloc_bg = 0; } spin_unlock(&fs_info->relocation_bg_lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 835071fa39a9..81d9fe33672f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -126,10 +126,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, block_group->disk_cache_state = BTRFS_DC_CLEAR; } - if (!block_group->iref) { + if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) block_group->inode = igrab(inode); - block_group->iref = 1; - } spin_unlock(&block_group->lock); return inode; @@ -241,8 +239,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans, clear_nlink(inode); /* One for the block groups ref */ spin_lock(&block_group->lock); - if (block_group->iref) { - block_group->iref = 0; + if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { block_group->inode = NULL; spin_unlock(&block_group->lock); iput(inode); @@ -2876,7 +2873,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group, if (btrfs_is_zoned(fs_info)) { btrfs_info(fs_info, "free space %llu active %d", block_group->zone_capacity - block_group->alloc_offset, - block_group->zone_is_active); + test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + &block_group->runtime_flags)); return; } @@ -4008,7 +4006,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, *trimmed = 0; spin_lock(&block_group->lock); - if (block_group->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); return 0; } @@ -4038,7 +4036,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, *trimmed = 0; spin_lock(&block_group->lock); - if (block_group->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); return 0; } @@ -4060,7 +4058,7 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, *trimmed = 0; spin_lock(&block_group->lock); - if (block_group->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); return 0; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 7d9b09e3ca70..534dd680a6aa 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3257,7 +3257,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx, } /* Block group removed? */ spin_lock(&bg->lock); - if (bg->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { spin_unlock(&bg->lock); ret = 0; break; @@ -3597,7 +3597,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, * kthread or relocation. */ spin_lock(&bg->lock); - if (!bg->removed) + if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) ret = -EINVAL; spin_unlock(&bg->lock); @@ -3756,7 +3756,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { spin_lock(&cache->lock); - if (!cache->to_copy) { + if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { spin_unlock(&cache->lock); btrfs_put_block_group(cache); goto skip; @@ -3773,7 +3773,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * repair extents. */ spin_lock(&cache->lock); - if (cache->removed) { + if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { spin_unlock(&cache->lock); btrfs_put_block_group(cache); goto skip; @@ -3933,8 +3933,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); - if (!cache->removed && !cache->ro && cache->reserved == 0 && - cache->used == 0) { + if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && + !cache->ro && cache->reserved == 0 && cache->used == 0) { spin_unlock(&cache->lock); if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) btrfs_discard_queue_work(&fs_info->discard_ctl, diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 6260e52d76f4..b74bc31e9a8e 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -305,7 +305,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, ASSERT(found); spin_lock(&found->lock); found->total_bytes += block_group->length; - if (block_group->zone_is_active) + if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) found->active_total_bytes += block_group->length; found->disk_total += block_group->length * factor; found->bytes_used += block_group->used; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f63ff91e2883..fcde4f085cb6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5595,7 +5595,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, if (ret) goto out; - bg->chunk_item_inserted = 1; + set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); @@ -6154,7 +6154,7 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) cache = btrfs_lookup_block_group(fs_info, logical); spin_lock(&cache->lock); - ret = cache->to_copy; + ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags); spin_unlock(&cache->lock); btrfs_put_block_group(cache); @@ -8244,7 +8244,7 @@ static int relocating_repair_kthread(void *data) if (!cache) goto out; - if (!cache->relocating_repair) + if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) goto out; ret = btrfs_may_alloc_data_chunk(fs_info, target); @@ -8282,12 +8282,11 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) return true; spin_lock(&cache->lock); - if (cache->relocating_repair) { + if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) { spin_unlock(&cache->lock); btrfs_put_block_group(cache); return true; } - cache->relocating_repair = 1; spin_unlock(&cache->lock); kthread_run(relocating_repair_kthread, cache, diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 73c6929f7be6..9f31865e6003 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1436,7 +1436,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) goto out; } else if (map->num_stripes == num_conventional) { cache->alloc_offset = last_alloc; - cache->zone_is_active = 1; + set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags); goto out; } } @@ -1452,7 +1452,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) } cache->alloc_offset = alloc_offsets[0]; cache->zone_capacity = caps[0]; - cache->zone_is_active = test_bit(0, active); + if (test_bit(0, active)) + set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags); break; case BTRFS_BLOCK_GROUP_DUP: if (map->type & BTRFS_BLOCK_GROUP_DATA) { @@ -1486,7 +1487,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) goto out; } } else { - cache->zone_is_active = test_bit(0, active); + if (test_bit(0, active)) + set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + &cache->runtime_flags); } cache->alloc_offset = alloc_offsets[0]; cache->zone_capacity = min(caps[0], caps[1]); @@ -1530,7 +1533,7 @@ out: if (!ret) { cache->meta_write_pointer = cache->alloc_offset + cache->start; - if (cache->zone_is_active) { + if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) { btrfs_get_block_group(cache); spin_lock(&fs_info->zone_active_bgs_lock); list_add_tail(&cache->active_bg_list, @@ -1871,7 +1874,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) spin_lock(&space_info->lock); spin_lock(&block_group->lock); - if (block_group->zone_is_active) { + if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { ret = true; goto out_unlock; } @@ -1897,7 +1900,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) } /* Successfully activated all the zones */ - block_group->zone_is_active = 1; + set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); space_info->active_total_bytes += block_group->length; spin_unlock(&block_group->lock); btrfs_try_granting_tickets(fs_info, space_info); @@ -1960,7 +1963,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ int i; spin_lock(&block_group->lock); - if (!block_group->zone_is_active) { + if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); return 0; } @@ -2001,7 +2004,8 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ * Bail out if someone already deactivated the block group, or * allocated space is left in the block group. */ - if (!block_group->zone_is_active) { + if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + &block_group->runtime_flags)) { spin_unlock(&block_group->lock); btrfs_dec_block_group_ro(block_group); return 0; @@ -2014,7 +2018,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ } } - block_group->zone_is_active = 0; + clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags); block_group->alloc_offset = block_group->zone_capacity; block_group->free_space_ctl->free_space = 0; btrfs_clear_treelog_bg(block_group); @@ -2222,13 +2226,14 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); spin_lock(&block_group->lock); - if (!block_group->zoned_data_reloc_ongoing) + if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) goto out; /* All relocation extents are written. */ if (block_group->start + block_group->alloc_offset == logical + length) { /* Now, release this block group for further allocations. */ - block_group->zoned_data_reloc_ongoing = 0; + clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, + &block_group->runtime_flags); } out: @@ -2300,7 +2305,9 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, list) { if (!spin_trylock(&bg->lock)) continue; - if (btrfs_zoned_bg_is_full(bg) || bg->zone_is_active) { + if (btrfs_zoned_bg_is_full(bg) || + test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, + &bg->runtime_flags)) { spin_unlock(&bg->lock); continue; } |