diff options
author | Christoph Hellwig <hch@lst.de> | 2022-05-24 15:15:29 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-05-28 15:15:27 +0300 |
commit | 32ac5a9b8bc511edcd81f03c3e21753789475709 (patch) | |
tree | fd8895fa48e7b7904a04310077538fbffaf1fde0 /block | |
parent | ae948fd6d02930a7e8e7c492d9627dfef18e7d7f (diff) | |
download | linux-32ac5a9b8bc511edcd81f03c3e21753789475709.tar.xz |
blk-mq: avoid a mess of casts for blk_end_sync_rq
Instead of trying to cast a __bitwise 32-bit integer to a larger integer
and then a pointer, just allow a struct with the blk_status_t and the
completion on stack and set the end_io_data to that. Use the
opportunity to move the code to where it belongs and drop rather
confusing comments.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220524121530.943123-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 43 |
1 files changed, 20 insertions, 23 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 31a89d1004b8..28b3e6db9849 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1151,24 +1151,6 @@ void blk_mq_start_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_start_request); -/** - * blk_end_sync_rq - executes a completion event on a request - * @rq: request to complete - * @error: end I/O status of the request - */ -static void blk_end_sync_rq(struct request *rq, blk_status_t error) -{ - struct completion *waiting = rq->end_io_data; - - rq->end_io_data = (void *)(uintptr_t)error; - - /* - * complete last, if this is a stack request the process (and thus - * the rq pointer) could be invalid right after this complete() - */ - complete(waiting); -} - /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging @@ -1231,6 +1213,19 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); +struct blk_rq_wait { + struct completion done; + blk_status_t ret; +}; + +static void blk_end_sync_rq(struct request *rq, blk_status_t ret) +{ + struct blk_rq_wait *wait = rq->end_io_data; + + wait->ret = ret; + complete(&wait->done); +} + static bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) @@ -1262,7 +1257,9 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { - DECLARE_COMPLETION_ONSTACK(wait); + struct blk_rq_wait wait = { + .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), + }; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); @@ -1274,7 +1271,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) blk_mq_sched_insert_request(rq, at_head, true, false); if (blk_rq_is_poll(rq)) { - blk_rq_poll_completion(rq, &wait); + blk_rq_poll_completion(rq, &wait.done); } else { /* * Prevent hang_check timer from firing at us during very long @@ -1283,14 +1280,14 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) unsigned long hang_check = sysctl_hung_task_timeout_secs; if (hang_check) - while (!wait_for_completion_io_timeout(&wait, + while (!wait_for_completion_io_timeout(&wait.done, hang_check * (HZ/2))) ; else - wait_for_completion_io(&wait); + wait_for_completion_io(&wait.done); } - return (blk_status_t)(uintptr_t)rq->end_io_data; + return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); |