diff options
author | Zizhi Wo <wozizhi@huawei.com> | 2025-05-06 05:09:30 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-05-13 21:08:27 +0300 |
commit | a404be5399d762f5737a4a731b42a38f552f2b44 (patch) | |
tree | 5c40b790a46d5fd6b26f4158adcff50fa7a10981 | |
parent | 3660cd4228d9330b618e2700491891f08824011d (diff) | |
download | linux-a404be5399d762f5737a4a731b42a38f552f2b44.tar.xz |
blk-throttle: Split throtl_charge_bio() into bps and iops functions
Split throtl_charge_bio() to facilitate subsequent patches that will
separately charge bps and iops after queue separation.
Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Zizhi Wo <wozizhi@huaweicloud.com>
Link: https://lore.kernel.org/r/20250506020935.655574-4-wozizhi@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-throttle.c | 35 |
1 files changed, 20 insertions, 15 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 24bc1a850581..fea09a91c20b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -787,6 +787,20 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, return jiffy_wait; } +static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio) +{ + unsigned int bio_size = throtl_bio_data_size(bio); + + /* Charge the bio to the group */ + if (!bio_flagged(bio, BIO_BPS_THROTTLED)) + tg->bytes_disp[bio_data_dir(bio)] += bio_size; +} + +static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio) +{ + tg->io_disp[bio_data_dir(bio)]++; +} + /* * If previous slice expired, start a new one otherwise renew/extend existing * slice to make sure it is at least throtl_slice interval long since now. New @@ -859,18 +873,6 @@ static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio) return max(bps_wait, iops_wait); } -static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) -{ - bool rw = bio_data_dir(bio); - unsigned int bio_size = throtl_bio_data_size(bio); - - /* Charge the bio to the group */ - if (!bio_flagged(bio, BIO_BPS_THROTTLED)) - tg->bytes_disp[rw] += bio_size; - - tg->io_disp[rw]++; -} - /** * throtl_add_bio_tg - add a bio to the specified throtl_grp * @bio: bio to add @@ -957,7 +959,8 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; - throtl_charge_bio(tg, bio); + throtl_charge_bps_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); /* * If our parent is another tg, we just need to transfer @bio to @@ -1684,7 +1687,8 @@ bool __blk_throtl_bio(struct bio *bio) while (true) { if (tg_within_limit(tg, bio, rw)) { /* within limits, let's charge and dispatch directly */ - throtl_charge_bio(tg, bio); + throtl_charge_bps_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); /* * We need to trim slice even when bios are not being @@ -1707,7 +1711,8 @@ bool __blk_throtl_bio(struct bio *bio) * control algorithm is adaptive, and extra IO bytes * will be throttled for paying the debt */ - throtl_charge_bio(tg, bio); + throtl_charge_bps_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); } else { /* if above limits, break to queue */ break; |