summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-11-17 20:43:42 +0300
committerJens Axboe <axboe@fb.com>2014-11-17 20:43:42 +0300
commit9d135bb8c2a0d2e54b84ebc1b7d41852614fead8 (patch)
tree75b9f750cd85b461a490631cf10d7b1c70904c8e /drivers/block
parent8d76d1015d86f2b66c872fbcaf46072228d757a5 (diff)
downloadlinux-9d135bb8c2a0d2e54b84ebc1b7d41852614fead8.tar.xz
NVMe: replace blk_put_request() with blk_mq_free_request()
No point in using blk_put_request(), since we know we are blk-mq. This only makes sense in core code where we could be dealing with either legacy or blk-mq drivers. Additionally, use blk_mq_free_hctx_request() for the request completion fast path, where we already know the mapping from request to hardware queue. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8393f91b2721..bbac17f29fe7 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -262,7 +262,7 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
dev_warn(nvmeq->q_dmadev,
"async event result %08x\n", result);
- blk_put_request(req);
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
}
static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
@@ -273,7 +273,7 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
u16 status = le16_to_cpup(&cqe->status) >> 1;
u32 result = le32_to_cpup(&cqe->result);
- blk_put_request(req);
+ blk_mq_free_hctx_request(nvmeq->hctx, req);
dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
++nvmeq->dev->abort_limit;
@@ -286,7 +286,7 @@ static void async_completion(struct nvme_queue *nvmeq, void *ctx,
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
- blk_put_request(cmdinfo->req);
+ blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req);
}
static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
@@ -872,7 +872,7 @@ static int __nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cm
if (!req)
return -ENOMEM;
res = nvme_submit_sync_cmd(req, cmd, result, timeout);
- blk_put_request(req);
+ blk_mq_free_request(req);
return res;
}
@@ -893,7 +893,7 @@ int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
if (!req)
return -ENOMEM;
res = nvme_submit_sync_cmd(req, cmd, result, NVME_IO_TIMEOUT);
- blk_put_request(req);
+ blk_mq_free_request(req);
return res;
}
@@ -1047,7 +1047,7 @@ static void nvme_abort_req(struct request *req)
dev_warn(nvmeq->q_dmadev,
"Could not abort I/O %d QID %d",
req->tag, nvmeq->qid);
- blk_put_request(req);
+ blk_mq_free_request(req);
}
}
@@ -1688,7 +1688,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
else {
status = nvme_submit_sync_cmd(req, &c, &cmd.result,
timeout);
- blk_put_request(req);
+ blk_mq_free_request(req);
}
} else
status = __nvme_submit_admin_cmd(dev, &c, &cmd.result, timeout);