summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2022-05-22 15:23:50 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-06-14 19:44:53 +0300
commit70fdd922c7bf8949f8df109cf2635dff64c90392 (patch)
treec7aa1f0d3a715139bc88068b9d9ca9edf12a4197
parentf067b5286edfd83d2d3903e8578b561599d62539 (diff)
downloadlinux-70fdd922c7bf8949f8df109cf2635dff64c90392.tar.xz
blk-mq: don't touch ->tagset in blk_mq_get_sq_hctx
[ Upstream commit 5d05426e2d5fd7df8afc866b78c36b37b00188b7 ] blk_mq_run_hw_queues() could be run when there isn't queued request and after queue is cleaned up, at that time tagset is freed, because tagset lifetime is covered by driver, and often freed after blk_cleanup_queue() returns. So don't touch ->tagset for figuring out current default hctx by the mapping built in request queue, so use-after-free on tagset can be avoided. Meantime this way should be fast than retrieving mapping from tagset. Cc: "yukuai (C)" <yukuai3@huawei.com> Cc: Jan Kara <jack@suse.cz> Fixes: b6e68ee82585 ("blk-mq: Improve performance of non-mq IO schedulers with multiple HW queues") Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220522122350.743103-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--block/blk-mq.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 84d749511f55..9d33e0032fee 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2123,8 +2123,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
-
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
@@ -2132,8 +2131,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
- hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
- raw_smp_processor_id());
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;