diff options
author | Christoph Hellwig <hch@lst.de> | 2022-06-08 09:34:09 +0300 |
---|---|---|
committer | Mike Snitzer <snitzer@kernel.org> | 2022-06-29 19:46:06 +0300 |
commit | e810cb78bc4b4febeac451a0d823ca68622cd86b (patch) | |
tree | 42cefa95e782c4157125cbbe136f6c9a011d0ac0 /drivers/md/dm-table.c | |
parent | 4ed045d8756a08dd6b823ffc6867dfc344491909 (diff) | |
download | linux-e810cb78bc4b4febeac451a0d823ca68622cd86b.tar.xz |
dm: refactor dm_md_mempool allocation
The current split between dm_table_alloc_md_mempools and
dm_alloc_md_mempools is rather arbitrary, so merge the two
into one easy to follow function.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 57 |
1 files changed, 41 insertions, 16 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bd539afbfe88..3f29b1113294 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -6,6 +6,7 @@ */ #include "dm-core.h" +#include "dm-rq.h" #include <linux/module.h> #include <linux/vmalloc.h> @@ -1010,32 +1011,56 @@ static bool dm_table_supports_poll(struct dm_table *t); static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { enum dm_queue_mode type = dm_table_get_type(t); - unsigned per_io_data_size = 0; - unsigned min_pool_size = 0; - struct dm_target *ti; - unsigned i; - bool poll_supported = false; + unsigned int per_io_data_size = 0, front_pad, io_front_pad; + unsigned int min_pool_size = 0, pool_size; + struct dm_md_mempools *pools; if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } - if (__table_type_bio_based(type)) { - for (i = 0; i < t->num_targets; i++) { - ti = t->targets + i; - per_io_data_size = max(per_io_data_size, ti->per_io_data_size); - min_pool_size = max(min_pool_size, ti->num_flush_bios); - } - poll_supported = dm_table_supports_poll(t); + pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); + if (!pools) + return -ENOMEM; + + if (type == DM_TYPE_REQUEST_BASED) { + pool_size = dm_get_reserved_rq_based_ios(); + front_pad = offsetof(struct dm_rq_clone_bio_info, clone); + goto init_bs; } - t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size, - t->integrity_supported, poll_supported); - if (!t->mempools) - return -ENOMEM; + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = t->targets + i; + per_io_data_size = max(per_io_data_size, ti->per_io_data_size); + min_pool_size = max(min_pool_size, ti->num_flush_bios); + } + pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); + front_pad = roundup(per_io_data_size, + __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; + + io_front_pad = roundup(per_io_data_size, + __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; + if (bioset_init(&pools->io_bs, pool_size, io_front_pad, + dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0)) + goto out_free_pools; + if (t->integrity_supported && + bioset_integrity_create(&pools->io_bs, pool_size)) + goto out_free_pools; +init_bs: + if (bioset_init(&pools->bs, pool_size, front_pad, 0)) + goto out_free_pools; + if (t->integrity_supported && + bioset_integrity_create(&pools->bs, pool_size)) + goto out_free_pools; + + t->mempools = pools; return 0; + +out_free_pools: + dm_free_md_mempools(pools); + return -ENOMEM; } static int setup_indexes(struct dm_table *t) |