diff options
author | Christoph Hellwig <hch@lst.de> | 2023-03-27 03:49:53 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2023-04-17 19:01:22 +0300 |
commit | 2c275afeb61dab732353aae2c7de01b6a87dcefc (patch) | |
tree | dd6e0f0dbecf7aeceb15226e0f28ee20652af25a /block | |
parent | 12be09fe18f2fd9f882ca0acbe14cf121250bcbe (diff) | |
download | linux-2c275afeb61dab732353aae2c7de01b6a87dcefc.tar.xz |
block: make blkcg_punt_bio_submit optional
Guard all the code to punt bios to a per-cgroup submission helper by a
new CONFIG_BLK_CGROUP_PUNT_BIO symbol that is selected by btrfs.
This way non-btrfs kernel builds don't need to have this code.
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 3 | ||||
-rw-r--r-- | block/blk-cgroup.c | 77 | ||||
-rw-r--r-- | block/blk-cgroup.h | 3 |
3 files changed, 47 insertions, 36 deletions
diff --git a/block/Kconfig b/block/Kconfig index 941b2dca70db..69ccf7457ae1 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -41,6 +41,9 @@ config BLK_RQ_ALLOC_TIME config BLK_CGROUP_RWSTAT bool +config BLK_CGROUP_PUNT_BIO + bool + config BLK_DEV_BSG_COMMON tristate diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c524ecab440b..18c922579719 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -56,7 +56,6 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ bool blkcg_debug_stats = false; -static struct workqueue_struct *blkcg_punt_bio_wq; #define BLKG_DESTROY_BATCH_SIZE 64 @@ -166,7 +165,9 @@ static void __blkg_release(struct rcu_head *rcu) { struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); +#ifdef CONFIG_BLK_CGROUP_PUNT_BIO WARN_ON(!bio_list_empty(&blkg->async_bios)); +#endif /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); @@ -188,6 +189,9 @@ static void blkg_release(struct percpu_ref *ref) call_rcu(&blkg->rcu_head, __blkg_release); } +#ifdef CONFIG_BLK_CGROUP_PUNT_BIO +static struct workqueue_struct *blkcg_punt_bio_wq; + static void blkg_async_bio_workfn(struct work_struct *work) { struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, @@ -214,6 +218,40 @@ static void blkg_async_bio_workfn(struct work_struct *work) blk_finish_plug(&plug); } +/* + * When a shared kthread issues a bio for a cgroup, doing so synchronously can + * lead to priority inversions as the kthread can be trapped waiting for that + * cgroup. Use this helper instead of submit_bio to punt the actual issuing to + * a dedicated per-blkcg work item to avoid such priority inversions. + */ +void blkcg_punt_bio_submit(struct bio *bio) +{ + struct blkcg_gq *blkg = bio->bi_blkg; + + if (blkg->parent) { + spin_lock(&blkg->async_bio_lock); + bio_list_add(&blkg->async_bios, bio); + spin_unlock(&blkg->async_bio_lock); + queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); + } else { + /* never bounce for the root cgroup */ + submit_bio(bio); + } +} +EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); + +static int __init blkcg_punt_bio_init(void) +{ + blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", + WQ_MEM_RECLAIM | WQ_FREEZABLE | + WQ_UNBOUND | WQ_SYSFS, 0); + if (!blkcg_punt_bio_wq) + return -ENOMEM; + return 0; +} +subsys_initcall(blkcg_punt_bio_init); +#endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ + /** * bio_blkcg_css - return the blkcg CSS associated with a bio * @bio: target bio @@ -269,10 +307,12 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg->q = disk->queue; INIT_LIST_HEAD(&blkg->q_node); + blkg->blkcg = blkcg; +#ifdef CONFIG_BLK_CGROUP_PUNT_BIO spin_lock_init(&blkg->async_bio_lock); bio_list_init(&blkg->async_bios); INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); - blkg->blkcg = blkcg; +#endif u64_stats_init(&blkg->iostat.sync); for_each_possible_cpu(cpu) { @@ -1689,28 +1729,6 @@ out_unlock: EXPORT_SYMBOL_GPL(blkcg_policy_unregister); /* - * When a shared kthread issues a bio for a cgroup, doing so synchronously can - * lead to priority inversions as the kthread can be trapped waiting for that - * cgroup. Use this helper instead of submit_bio to punt the actual issuing to - * a dedicated per-blkcg work item to avoid such priority inversions. - */ -void blkcg_punt_bio_submit(struct bio *bio) -{ - struct blkcg_gq *blkg = bio->bi_blkg; - - if (blkg->parent) { - spin_lock(&blkg->async_bio_lock); - bio_list_add(&blkg->async_bios, bio); - spin_unlock(&blkg->async_bio_lock); - queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); - } else { - /* never bounce for the root cgroup */ - submit_bio(bio); - } -} -EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); - -/* * Scale the accumulated delay based on how long it has been since we updated * the delay. We only call this when we are adding delay, in case it's been a * while since we added delay, and when we are checking to see if we need to @@ -2088,16 +2106,5 @@ bool blk_cgroup_congested(void) return ret; } -static int __init blkcg_init(void) -{ - blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", - WQ_MEM_RECLAIM | WQ_FREEZABLE | - WQ_UNBOUND | WQ_SYSFS, 0); - if (!blkcg_punt_bio_wq) - return -ENOMEM; - return 0; -} -subsys_initcall(blkcg_init); - module_param(blkcg_debug_stats, bool, 0644); MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 64758ab9f1f1..e98d2c1be354 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -72,9 +72,10 @@ struct blkcg_gq { struct blkg_iostat_set iostat; struct blkg_policy_data *pd[BLKCG_MAX_POLS]; - +#ifdef CONFIG_BLK_CGROUP_PUNT_BIO spinlock_t async_bio_lock; struct bio_list async_bios; +#endif union { struct work_struct async_bio_work; struct work_struct free_work; |