summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2024-09-06 22:45:40 +0300
committerJens Axboe <axboe@kernel.dk>2024-09-07 16:41:12 +0300
commitacc8c0a9887558966295e3c0881e633154c239a9 (patch)
treef02426d1690d20263391b26ff3391802605c685e
parenta02e98bebc15f1d973c2a62005be9456a657e2b6 (diff)
downloadlinux-acc8c0a9887558966295e3c0881e633154c239a9.tar.xz
blk-mq: add missing unplug trace event
The single-queue optimized list flush doesn't have an unplug trace event to pair with the plug event. Add one. In the unlikely event an error occurs and falls back to the less optimized plug flush path, it's possible a 2nd unplug trace event will be logged, but it will show the remainig count that weren't previously handled. Signed-off-by: Keith Busch <kbusch@kernel.org> Link: https://lore.kernel.org/r/20240906194540.3719642-1-kbusch@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 36abbaefe387..3f1f7d0b3ff3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2753,6 +2753,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request *rq;
+ unsigned int depth;
/*
* We may have been called recursively midway through handling
@@ -2763,6 +2764,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (plug->rq_count == 0)
return;
+ depth = plug->rq_count;
plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
@@ -2770,6 +2772,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
rq = rq_list_peek(&plug->mq_list);
q = rq->q;
+ trace_block_unplug(q, depth, true);
/*
* Peek first request and see if we have a ->queue_rqs() hook.