diff options
author | Mike Snitzer <snitzer@kernel.org> | 2022-03-26 21:14:00 +0300 |
---|---|---|
committer | Mike Snitzer <snitzer@kernel.org> | 2022-05-06 00:31:34 +0300 |
commit | 442761fd2b297d65d1cb5786249e1e07a19e9122 (patch) | |
tree | 5f2b1b8f1c25cd74c1e2c0af3ce2e6fed3941c5b /drivers/md | |
parent | 563a225c9fd207326c2a2af9d59b4097cb31ce70 (diff) | |
download | linux-442761fd2b297d65d1cb5786249e1e07a19e9122.tar.xz |
dm: conditionally enable branching for less used features
Use jump_labels to further reduce cost of unlikely branches for zoned
block devices, dm-stats and swap_bios throttling.
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-core.h | 5 | ||||
-rw-r--r-- | drivers/md/dm-stats.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 63 |
4 files changed, 53 insertions, 23 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 41d6511dc7cf..8ba99eaa0872 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -13,6 +13,7 @@ #include <linux/ktime.h> #include <linux/blk-mq.h> #include <linux/blk-crypto-profile.h> +#include <linux/jump_label.h> #include <trace/events/block.h> @@ -154,6 +155,10 @@ static inline struct dm_stats *dm_get_stats(struct mapped_device *md) return &md->stats; } +DECLARE_STATIC_KEY_FALSE(stats_enabled); +DECLARE_STATIC_KEY_FALSE(swap_bios_enabled); +DECLARE_STATIC_KEY_FALSE(zoned_enabled); + static inline bool dm_emulate_zone_append(struct mapped_device *md) { if (blk_queue_is_zoned(md->queue)) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 0e039a8c0bf2..86e0697330e8 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -396,6 +396,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, dm_stats_recalc_precise_timestamps(stats); + if (!static_key_enabled(&stats_enabled.key)) + static_branch_enable(&stats_enabled); + mutex_unlock(&stats->mutex); resume_callback(md); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 73ed15d8cfc6..a37c7b763643 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", dm_device_name(t->md), type); + if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key)) + static_branch_enable(&swap_bios_enabled); + return 0; bad: @@ -2040,6 +2043,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, r = dm_set_zones_restrictions(t, q); if (r) return r; + if (!static_key_enabled(&zoned_enabled.key)) + static_branch_enable(&zoned_enabled); } dm_update_crypto_profile(q, t); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d7b7154edffd..6304322a48f0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -71,6 +71,10 @@ void dm_issue_global_event(void) wake_up(&dm_global_eventq); } +DEFINE_STATIC_KEY_FALSE(stats_enabled); +DEFINE_STATIC_KEY_FALSE(swap_bios_enabled); +DEFINE_STATIC_KEY_FALSE(zoned_enabled); + /* * One of these is allocated (on-stack) per original bio. */ @@ -516,7 +520,8 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, else bio_end_io_acct(bio, start_time); - if (unlikely(dm_stats_used(&md->stats))) + if (static_branch_unlikely(&stats_enabled) && + unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), end, start_time, stats_aux); @@ -586,7 +591,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) io->start_time = jiffies; io->flags = 0; - dm_stats_record_start(&md->stats, &io->stats_aux); + if (static_branch_unlikely(&stats_enabled)) + dm_stats_record_start(&md->stats, &io->stats_aux); return io; } @@ -1012,21 +1018,25 @@ static void clone_endio(struct bio *bio) disable_write_zeroes(md); } - if (unlikely(blk_queue_is_zoned(q))) + if (static_branch_unlikely(&zoned_enabled) && + unlikely(blk_queue_is_zoned(q))) dm_zone_endio(io, bio); if (endio) { int r = endio(ti, bio, &error); switch (r) { case DM_ENDIO_REQUEUE: - /* - * Requeuing writes to a sequential zone of a zoned - * target will break the sequential write pattern: - * fail such IO. - */ - if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) - error = BLK_STS_IOERR; - else + if (static_branch_unlikely(&zoned_enabled)) { + /* + * Requeuing writes to a sequential zone of a zoned + * target will break the sequential write pattern: + * fail such IO. + */ + if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) + error = BLK_STS_IOERR; + else + error = BLK_STS_DM_REQUEUE; + } else error = BLK_STS_DM_REQUEUE; fallthrough; case DM_ENDIO_DONE: @@ -1040,7 +1050,8 @@ static void clone_endio(struct bio *bio) } } - if (unlikely(swap_bios_limit(ti, bio))) + if (static_branch_unlikely(&swap_bios_enabled) && + unlikely(swap_bios_limit(ti, bio))) up(&md->swap_bios_semaphore); free_tio(bio); @@ -1295,21 +1306,25 @@ static void __map_bio(struct bio *clone) dm_io_inc_pending(io); tio->old_sector = clone->bi_iter.bi_sector; - if (unlikely(swap_bios_limit(ti, clone))) { + if (static_branch_unlikely(&swap_bios_enabled) && + unlikely(swap_bios_limit(ti, clone))) { int latch = get_swap_bios(); if (unlikely(latch != md->swap_bios)) __set_swap_bios_limit(md, latch); down(&md->swap_bios_semaphore); } - /* - * Check if the IO needs a special mapping due to zone append emulation - * on zoned target. In this case, dm_zone_map_bio() calls the target - * map operation. - */ - if (unlikely(dm_emulate_zone_append(md))) - r = dm_zone_map_bio(tio); - else + if (static_branch_unlikely(&zoned_enabled)) { + /* + * Check if the IO needs a special mapping due to zone append + * emulation on zoned target. In this case, dm_zone_map_bio() + * calls the target map operation. + */ + if (unlikely(dm_emulate_zone_append(md))) + r = dm_zone_map_bio(tio); + else + r = ti->type->map(ti, clone); + } else r = ti->type->map(ti, clone); switch (r) { @@ -1329,7 +1344,8 @@ static void __map_bio(struct bio *clone) break; case DM_MAPIO_KILL: case DM_MAPIO_REQUEUE: - if (unlikely(swap_bios_limit(ti, clone))) + if (static_branch_unlikely(&swap_bios_enabled) && + unlikely(swap_bios_limit(ti, clone))) up(&md->swap_bios_semaphore); free_tio(clone); if (r == DM_MAPIO_KILL) @@ -1565,7 +1581,8 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, ci->sector_count = bio_sectors(bio); /* Shouldn't happen but sector_count was being set to 0 so... */ - if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) + if (static_branch_unlikely(&zoned_enabled) && + WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) ci->sector_count = 0; } |