summaryrefslogtreecommitdiff
path: root/block/blk-throttle.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-30 16:47:38 +0300
committerJens Axboe <axboe@kernel.dk>2022-09-30 16:47:38 +0300
commit736feaa3a08124020afe6e51f50bae8598c99f55 (patch)
tree2bfb7cb73a389229e69cb48a32098f6108bf2dc5 /block/blk-throttle.h
parentf76349cf41451c5c42a99f18a9163377e4b364ff (diff)
parent30514bd2dd4e86a3ecfd6a93a3eadf7b9ea164a0 (diff)
downloadlinux-736feaa3a08124020afe6e51f50bae8598c99f55.tar.xz
Merge branch 'for-6.1/block' into for-6.1/passthrough
* for-6.1/block: (162 commits) sbitmap: fix lockup while swapping block: add rationale for not using blk_mq_plug() when applicable block: adapt blk_mq_plug() to not plug for writes that require a zone lock s390/dasd: use blk_mq_alloc_disk blk-cgroup: don't update the blkg lookup hint in blkg_conf_prep nvmet: don't look at the request_queue in nvmet_bdev_set_limits nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all blk-mq: use quiesced elevator switch when reinitializing queues block: replace blk_queue_nowait with bdev_nowait nvme: remove nvme_ctrl_init_connect_q nvme-loop: use the tagset alloc/free helpers nvme-loop: store the generic nvme_ctrl in set->driver_data nvme-loop: initialize sqsize later nvme-fc: use the tagset alloc/free helpers nvme-fc: store the generic nvme_ctrl in set->driver_data nvme-fc: keep ctrl->sqsize in sync with opts->queue_size nvme-rdma: use the tagset alloc/free helpers nvme-rdma: store the generic nvme_ctrl in set->driver_data nvme-tcp: use the tagset alloc/free helpers nvme-tcp: store the generic nvme_ctrl in set->driver_data ... Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.h')
-rw-r--r--block/blk-throttle.h53
1 files changed, 36 insertions, 17 deletions
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index c1b602996127..ef4b7a4de987 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -55,8 +55,7 @@ struct throtl_service_queue {
enum tg_state_flags {
THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
- THROTL_TG_HAS_IOPS_LIMIT = 1 << 2, /* tg has iops limit */
- THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */
+ THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
};
enum {
@@ -99,7 +98,8 @@ struct throtl_grp {
unsigned int flags;
/* are there any throtl rules between this group and td? */
- bool has_rules[2];
+ bool has_rules_bps[2];
+ bool has_rules_iops[2];
/* internally used bytes per second rate limits */
uint64_t bps[2][LIMIT_CNT];
@@ -121,6 +121,15 @@ struct throtl_grp {
uint64_t last_bytes_disp[2];
unsigned int last_io_disp[2];
+ /*
+ * The following two fields are updated when new configuration is
+ * submitted while some bios are still throttled, they record how many
+ * bytes/ios are waited already in previous configuration, and they will
+ * be used to calculate wait time under new configuration.
+ */
+ uint64_t carryover_bytes[2];
+ unsigned int carryover_ios[2];
+
unsigned long last_check_time;
unsigned long latency_target; /* us */
@@ -159,27 +168,37 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
* Internal throttling interface
*/
#ifndef CONFIG_BLK_DEV_THROTTLING
-static inline int blk_throtl_init(struct request_queue *q) { return 0; }
-static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_register_queue(struct request_queue *q) { }
+static inline int blk_throtl_init(struct gendisk *disk) { return 0; }
+static inline void blk_throtl_exit(struct gendisk *disk) { }
+static inline void blk_throtl_register(struct gendisk *disk) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
-static inline void blk_throtl_cancel_bios(struct request_queue *q) { }
+static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
#else /* CONFIG_BLK_DEV_THROTTLING */
-int blk_throtl_init(struct request_queue *q);
-void blk_throtl_exit(struct request_queue *q);
-void blk_throtl_register_queue(struct request_queue *q);
+int blk_throtl_init(struct gendisk *disk);
+void blk_throtl_exit(struct gendisk *disk);
+void blk_throtl_register(struct gendisk *disk);
bool __blk_throtl_bio(struct bio *bio);
-void blk_throtl_cancel_bios(struct request_queue *q);
-static inline bool blk_throtl_bio(struct bio *bio)
+void blk_throtl_cancel_bios(struct gendisk *disk);
+
+static inline bool blk_should_throtl(struct bio *bio)
{
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
+ int rw = bio_data_dir(bio);
- /* no need to throttle bps any more if the bio has been throttled */
- if (bio_flagged(bio, BIO_THROTTLED) &&
- !(tg->flags & THROTL_TG_HAS_IOPS_LIMIT))
- return false;
+ /* iops limit is always counted */
+ if (tg->has_rules_iops[rw])
+ return true;
+
+ if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED))
+ return true;
+
+ return false;
+}
+
+static inline bool blk_throtl_bio(struct bio *bio)
+{
- if (!tg->has_rules[bio_data_dir(bio)])
+ if (!blk_should_throtl(bio))
return false;
return __blk_throtl_bio(bio);