diff options
author | Naohiro Aota <naohiro.aota@wdc.com> | 2021-08-19 15:19:18 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2021-10-26 20:07:59 +0300 |
commit | 68a384b5ab4d3925c35f94ab5723c39bf605466c (patch) | |
tree | ff1dac54d1d978445d14c563b92096ae24f4fd75 /fs/btrfs/zoned.c | |
parent | afba2bc036b0ed983bef6bd7c00c827e97d1ba31 (diff) | |
download | linux-68a384b5ab4d3925c35f94ab5723c39bf605466c.tar.xz |
btrfs: zoned: load active zone info for block group
Load activeness of underlying zones of a block group. When underlying zones
are active, we add the block group to the fs_info->zone_active_bgs list.
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/zoned.c')
-rw-r--r-- | fs/btrfs/zoned.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 614499a83e8c..942a34771383 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1170,6 +1170,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) unsigned int nofs_flag; u64 *alloc_offsets = NULL; u64 *caps = NULL; + unsigned long *active = NULL; u64 last_alloc = 0; u32 num_sequential = 0, num_conventional = 0; @@ -1214,6 +1215,12 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) goto out; } + active = bitmap_zalloc(map->num_stripes, GFP_NOFS); + if (!active) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < map->num_stripes; i++) { bool is_sequential; struct blk_zone zone; @@ -1297,8 +1304,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) /* Partially used zone */ alloc_offsets[i] = ((zone.wp - zone.start) << SECTOR_SHIFT); + __set_bit(i, active); break; } + + /* + * Consider a zone as active if we can allow any number of + * active zones. + */ + if (!device->zone_info->max_active_zones) + __set_bit(i, active); } if (num_sequential > 0) @@ -1346,6 +1361,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) } cache->alloc_offset = alloc_offsets[0]; cache->zone_capacity = caps[0]; + cache->zone_is_active = test_bit(0, active); break; case BTRFS_BLOCK_GROUP_DUP: case BTRFS_BLOCK_GROUP_RAID1: @@ -1361,6 +1377,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) goto out; } + if (cache->zone_is_active) { + btrfs_get_block_group(cache); + spin_lock(&fs_info->zone_active_bgs_lock); + list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs); + spin_unlock(&fs_info->zone_active_bgs_lock); + } + out: if (cache->alloc_offset > fs_info->zone_size) { btrfs_err(fs_info, @@ -1392,6 +1415,7 @@ out: kfree(cache->physical_map); cache->physical_map = NULL; } + bitmap_free(active); kfree(caps); kfree(alloc_offsets); free_extent_map(em); |