diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-11-04 21:45:51 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-11-04 21:54:33 +0300 |
commit | c98cb5bbdab10d187aff9b4e386210eb2332af96 (patch) | |
tree | b1cbcd39a591ee32896da9146abb490fa34d9183 /block | |
parent | 71539717c10521114403d27e171c9cbe35dcd900 (diff) | |
download | linux-c98cb5bbdab10d187aff9b4e386210eb2332af96.tar.xz |
block: make bio_queue_enter() fast-path available inline
Just a prep patch for shifting the queue enter logic. This moves the
expected fast path inline, and leaves __bio_queue_enter() as an
out-of-line function call. We don't want to inline the latter, as it's
mostly slow path code.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 28 | ||||
-rw-r--r-- | block/blk.h | 34 |
2 files changed, 35 insertions, 27 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 35a87c06276e..9ca3ddd154d4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static bool blk_try_enter_queue(struct request_queue *q, bool pm) -{ - rcu_read_lock(); - if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) - goto fail; - - /* - * The code that increments the pm_only counter must ensure that the - * counter is globally visible before the queue is unfrozen. - */ - if (blk_queue_pm_only(q) && - (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) - goto fail_put; - - rcu_read_unlock(); - return true; - -fail_put: - blk_queue_exit(q); -fail: - rcu_read_unlock(); - return false; -} - /** * blk_queue_enter() - try to increase q->q_usage_counter * @q: request queue pointer @@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) return 0; } -static inline int bio_queue_enter(struct bio *bio) +int __bio_queue_enter(struct request_queue *q, struct bio *bio) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - while (!blk_try_enter_queue(q, false)) { struct gendisk *disk = bio->bi_bdev->bd_disk; diff --git a/block/blk.h b/block/blk.h index 7afffd548daf..814d9632d43e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q); void blk_freeze_queue(struct request_queue *q); void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); void blk_queue_start_drain(struct request_queue *q); +int __bio_queue_enter(struct request_queue *q, struct bio *bio); + +static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) +{ + rcu_read_lock(); + if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) + goto fail; + + /* + * The code that increments the pm_only counter must ensure that the + * counter is globally visible before the queue is unfrozen. + */ + if (blk_queue_pm_only(q) && + (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) + goto fail_put; + + rcu_read_unlock(); + return true; + +fail_put: + blk_queue_exit(q); +fail: + rcu_read_unlock(); + return false; +} + +static inline int bio_queue_enter(struct bio *bio) +{ + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + + if (blk_try_enter_queue(q, false)) + return 0; + return __bio_queue_enter(q, bio); +} #define BIO_INLINE_VECS 4 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |