diff options
author | Ming Lei <ming.lei@redhat.com> | 2021-11-18 18:30:41 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-11-19 16:28:18 +0300 |
commit | 2b504bd4841bccbf3eb83c1fec229b65956ad8ad (patch) | |
tree | 378f430323f71a363397750bd26a821a080ef011 | |
parent | 15c30104965101b8e76b24d27035569d6613a7d6 (diff) | |
download | linux-2b504bd4841bccbf3eb83c1fec229b65956ad8ad.tar.xz |
blk-mq: don't insert FUA request with data into scheduler queue
We never insert flush request into scheduler queue before.
Recently commit d92ca9d8348f ("blk-mq: don't handle non-flush requests in
blk_insert_flush") tries to handle FUA data request as normal request.
This way has caused warning[1] in mq-deadline dd_exit_sched() or io hang in
case of kyber since RQF_ELVPRIV isn't set for flush request, then
->finish_request won't be called.
Fix the issue by inserting FUA data request with blk_mq_request_bypass_insert()
when the device supports FUA, just like what we did before.
[1] https://lore.kernel.org/linux-block/CAHj4cs-_vkTW=dAzbZYGxpEWSpzpcmaNeY1R=vH311+9vMUSdg@mail.gmail.com/
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Fixes: d92ca9d8348f ("blk-mq: don't handle non-flush requests in blk_insert_flush")
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20211118153041.2163228-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-flush.c | 12 | ||||
-rw-r--r-- | block/blk-mq.c | 4 | ||||
-rw-r--r-- | block/blk.h | 2 |
3 files changed, 10 insertions, 8 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index 8e364bda5166..1fce6d16e6d3 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. */ -bool blk_insert_flush(struct request *rq) +void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned long fflags = q->queue_flags; /* may change, cache */ @@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq) */ if (!policy) { blk_mq_end_request(rq, 0); - return true; + return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ @@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq) * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && - !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) - return false; + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { + blk_mq_request_bypass_insert(rq, false, true); + return; + } /* * @rq should go through flush machinery. Mark it part of flush @@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq) spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); - - return true; } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index eecbd7e6fea2..8799fa73ef34 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2647,8 +2647,10 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) + if (op_is_flush(bio->bi_opf)) { + blk_insert_flush(rq); return; + } if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || diff --git a/block/blk.h b/block/blk.h index b4fed2033e48..ccde6e6f1736 100644 --- a/block/blk.h +++ b/block/blk.h @@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now); */ #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) -bool blk_insert_flush(struct request *rq); +void blk_insert_flush(struct request *rq); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); |