diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 24 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 3 | ||||
-rw-r--r-- | block/blk-mq.c | 17 | ||||
-rw-r--r-- | block/blk-mq.h | 2 | ||||
-rw-r--r-- | block/mq-deadline.c | 2 |
5 files changed, 14 insertions, 34 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 811a9765b745..9c0d231722d9 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -455,30 +455,6 @@ run: blk_mq_run_hw_queue(hctx, async); } -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async) -{ - struct elevator_queue *e; - struct request_queue *q = hctx->queue; - - /* - * blk_mq_sched_insert_requests() is called from flush plug - * context only, and hold one usage counter to prevent queue - * from being released. - */ - percpu_ref_get(&q->q_usage_counter); - - e = hctx->queue->elevator; - if (e) { - e->type->ops.insert_requests(hctx, list, false); - blk_mq_run_hw_queue(hctx, run_queue_async); - } else { - blk_mq_insert_requests(hctx, ctx, list, run_queue_async); - } - percpu_ref_put(&q->q_usage_counter); -} - static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 65cab6e475be..1ec01e9934dc 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async); -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); diff --git a/block/blk-mq.c b/block/blk-mq.c index 536f001282bb..f1da4f053cc6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, blk_mq_run_hw_queue(hctx, false); } -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async) - +static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, struct list_head *list, + bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; @@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) plug->mq_list = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); - blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); + + percpu_ref_get(&this_hctx->queue->q_usage_counter); + if (this_hctx->queue->elevator) { + this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, + &list, false); + blk_mq_run_hw_queue(this_hctx, from_sched); + } else { + blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); + } + percpu_ref_put(&this_hctx->queue->q_usage_counter); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) diff --git a/block/blk-mq.h b/block/blk-mq.h index 5d551f9ef2d6..bd7ae5e67a52 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); void blk_mq_request_bypass_insert(struct request *rq, bool at_head, bool run_queue); -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); /* * CPU -> queue mappings diff --git a/block/mq-deadline.c b/block/mq-deadline.c index af9e79050dcc..d62a3039c8e0 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } /* - * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). + * Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool at_head) |