diff options
author | Christoph Hellwig <hch@lst.de> | 2016-09-22 21:38:23 +0300 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-22 23:27:39 +0300 |
commit | 63581af3f31e0dbea112b83f77c4fbb6a10e1406 (patch) | |
tree | ca9d5f19448c800daffee3183e197ff361a2c495 /block/blk-mq.c | |
parent | 841bac2c87fc21c3ecf3bc3354855921735aeec1 (diff) | |
download | linux-63581af3f31e0dbea112b83f77c4fbb6a10e1406.tar.xz |
blk-mq: remove non-blocking pass in blk_mq_map_request
bt_get already does a non-blocking pass as well as running the queue
when scheduling internally, no need to duplicate it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 14 |
1 files changed, 1 insertions, 13 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index c29700010b5c..80d483864247 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1210,20 +1210,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, op_flags |= REQ_SYNC; trace_block_getrq(q, bio, op); - blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); + blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); - if (unlikely(!rq)) { - blk_mq_run_hw_queue(hctx, false); - blk_mq_put_ctx(ctx); - trace_block_sleeprq(q, bio, op); - - ctx = blk_mq_get_ctx(q); - hctx = q->mq_ops->map_queue(q, ctx->cpu); - blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); - rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); - ctx = alloc_data.ctx; - hctx = alloc_data.hctx; - } hctx->queued++; data->hctx = hctx; |