diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-16 19:15:22 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-18 19:08:55 +0300 |
commit | 6af54051a07041d8d4e36b1b01136a0db4eb7e23 (patch) | |
tree | 283576b0be587fc2b088d75cd2195525eb9e8460 /block/blk-mq.c | |
parent | 7b9e93616399638521aafd1f01dfcf474c736393 (diff) | |
download | linux-6af54051a07041d8d4e36b1b01136a0db4eb7e23.tar.xz |
blk-mq: simplify blk_mq_free_request
Merge three functions only tail-called by blk_mq_free_request into
blk_mq_free_request.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 50 |
1 files changed, 15 insertions, 35 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9df7e0394a48..0b17351fccfc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -395,12 +395,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); -void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct request *rq) +void blk_mq_free_request(struct request *rq) { - const int sched_tag = rq->internal_tag; struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; + struct blk_mq_ctx *ctx = rq->mq_ctx; + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); + const int sched_tag = rq->internal_tag; + if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { + if (e && e->type->ops.mq.finish_request) + e->type->ops.mq.finish_request(rq); + if (rq->elv.icq) { + put_io_context(rq->elv.icq->ioc); + rq->elv.icq = NULL; + } + } + + ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); @@ -416,38 +428,6 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, blk_mq_sched_restart(hctx); blk_queue_exit(q); } - -static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx, - struct request *rq) -{ - struct blk_mq_ctx *ctx = rq->mq_ctx; - - ctx->rq_completed[rq_is_sync(rq)]++; - __blk_mq_finish_request(hctx, ctx, rq); -} - -void blk_mq_finish_request(struct request *rq) -{ - blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq); -} -EXPORT_SYMBOL_GPL(blk_mq_finish_request); - -void blk_mq_free_request(struct request *rq) -{ - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; - - if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { - if (e && e->type->ops.mq.finish_request) - e->type->ops.mq.finish_request(rq); - if (rq->elv.icq) { - put_io_context(rq->elv.icq->ioc); - rq->elv.icq = NULL; - } - } - - blk_mq_finish_request(rq); -} EXPORT_SYMBOL_GPL(blk_mq_free_request); inline void __blk_mq_end_request(struct request *rq, blk_status_t error) |