diff options
author | Christoph Hellwig <hch@lst.de> | 2022-05-24 15:15:28 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-05-28 15:15:27 +0300 |
commit | ae948fd6d02930a7e8e7c492d9627dfef18e7d7f (patch) | |
tree | cd6e1b79b795628f11910b9be02f26f66579e210 /block | |
parent | bf272460d744112bacd4c4d562592decbf0edf64 (diff) | |
download | linux-ae948fd6d02930a7e8e7c492d9627dfef18e7d7f.tar.xz |
blk-mq: remove __blk_execute_rq_nowait
We don't want to plug for synchronous execution that where we immediately
wait for the request. Once that is done not a whole lot of code is
shared, so just remove __blk_execute_rq_nowait.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220524121530.943123-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 69 |
1 files changed, 30 insertions, 39 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index ae116b755648..31a89d1004b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1203,28 +1203,6 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) plug->rq_count++; } -static void __blk_execute_rq_nowait(struct request *rq, bool at_head, - rq_end_io_fn *done, bool use_plug) -{ - WARN_ON(irqs_disabled()); - WARN_ON(!blk_rq_is_passthrough(rq)); - - rq->end_io = done; - - blk_account_io_start(rq); - - if (use_plug && current->plug) { - blk_add_rq_to_plug(current->plug, rq); - return; - } - /* - * don't check dying flag for MQ because the request won't - * be reused after dying flag is set - */ - blk_mq_sched_insert_request(rq, at_head, true, false); -} - - /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert @@ -1240,8 +1218,16 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head, */ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) { - __blk_execute_rq_nowait(rq, at_head, done, true); + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + rq->end_io = done; + + blk_account_io_start(rq); + if (current->plug) + blk_add_rq_to_plug(current->plug, rq); + else + blk_mq_sched_insert_request(rq, at_head, true, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -1277,27 +1263,32 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) blk_status_t blk_execute_rq(struct request *rq, bool at_head) { DECLARE_COMPLETION_ONSTACK(wait); - unsigned long hang_check; - /* - * iopoll requires request to be submitted to driver, so can't - * use plug - */ + WARN_ON(irqs_disabled()); + WARN_ON(!blk_rq_is_passthrough(rq)); + rq->end_io_data = &wait; - __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, - !blk_rq_is_poll(rq)); + rq->end_io = blk_end_sync_rq; - /* Prevent hang_check timer from firing at us during very long I/O */ - hang_check = sysctl_hung_task_timeout_secs; + blk_account_io_start(rq); + blk_mq_sched_insert_request(rq, at_head, true, false); - if (blk_rq_is_poll(rq)) + if (blk_rq_is_poll(rq)) { blk_rq_poll_completion(rq, &wait); - else if (hang_check) - while (!wait_for_completion_io_timeout(&wait, - hang_check * (HZ/2))) - ; - else - wait_for_completion_io(&wait); + } else { + /* + * Prevent hang_check timer from firing at us during very long + * I/O + */ + unsigned long hang_check = sysctl_hung_task_timeout_secs; + + if (hang_check) + while (!wait_for_completion_io_timeout(&wait, + hang_check * (HZ/2))) + ; + else + wait_for_completion_io(&wait); + } return (blk_status_t)(uintptr_t)rq->end_io_data; } |