diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-20 03:29:21 +0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-04-20 12:06:40 +0400 |
commit | 29e2b09ab5fa790514d47838f3c05497130908b3 (patch) | |
tree | aa430587f78d90d3108c1885f8049da484631935 /block/blk-core.c | |
parent | f9fcc2d3919b8eb575b3cee9274feefafb641bca (diff) | |
download | linux-29e2b09ab5fa790514d47838f3c05497130908b3.tar.xz |
block: collapse blk_alloc_request() into get_request()
Allocation failure handling in get_request() is about to be updated.
To ease the update, collapse blk_alloc_request() into get_request().
This patch doesn't introduce any functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 46 |
1 files changed, 17 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3b02ba351f8c..f6f68b0c8302 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -719,33 +719,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) mempool_free(rq, q->rq.rq_pool); } -static struct request * -blk_alloc_request(struct request_queue *q, struct bio *bio, struct io_cq *icq, - unsigned int flags, gfp_t gfp_mask) -{ - struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); - - if (!rq) - return NULL; - - blk_rq_init(q, rq); - - rq->cmd_flags = flags | REQ_ALLOCED; - - if (flags & REQ_ELVPRIV) { - rq->elv.icq = icq; - if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { - mempool_free(rq, q->rq.rq_pool); - return NULL; - } - /* @rq->elv.icq holds on to io_context until @rq is freed */ - if (icq) - get_io_context(icq->ioc); - } - - return rq; -} - /* * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. @@ -968,10 +941,25 @@ retry: goto fail_alloc; } - rq = blk_alloc_request(q, bio, icq, rw_flags, gfp_mask); - if (unlikely(!rq)) + /* allocate and init request */ + rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + if (!rq) goto fail_alloc; + blk_rq_init(q, rq); + rq->cmd_flags = rw_flags | REQ_ALLOCED; + + if (rw_flags & REQ_ELVPRIV) { + rq->elv.icq = icq; + if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { + mempool_free(rq, q->rq.rq_pool); + goto fail_alloc; + } + /* @rq->elv.icq holds on to io_context until @rq is freed */ + if (icq) + get_io_context(icq->ioc); + } + /* * ioc may be NULL here, and ioc_batching will be false. That's * OK, if the queue is under the request limit then requests need |