summaryrefslogtreecommitdiff
path: root/drivers/nvme/target/loop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/loop.c')
-rw-r--r--drivers/nvme/target/loop.c178
1 files changed, 102 insertions, 76 deletions
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d1f06e7768ff..304f1c87c160 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -13,12 +13,10 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/scatterlist.h>
-#include <linux/delay.h>
#include <linux/blk-mq.h>
#include <linux/nvme.h>
#include <linux/module.h>
#include <linux/parser.h>
-#include <linux/t10-pi.h>
#include "nvmet.h"
#include "../host/nvme.h"
#include "../host/fabrics.h"
@@ -93,31 +91,26 @@ static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
static void nvme_loop_complete_rq(struct request *req)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
- int error = 0;
nvme_cleanup_cmd(req);
sg_free_table_chained(&iod->sg_table, true);
+ nvme_complete_rq(req);
+}
- if (unlikely(req->errors)) {
- if (nvme_req_needs_retry(req, req->errors)) {
- nvme_requeue_req(req);
- return;
- }
-
- if (blk_rq_is_passthrough(req))
- error = req->errors;
- else
- error = nvme_error_status(req->errors);
- }
+static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
+{
+ u32 queue_idx = nvme_loop_queue_idx(queue);
- blk_mq_end_request(req, error);
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
}
static void nvme_loop_queue_response(struct nvmet_req *req)
{
- struct nvme_loop_iod *iod =
- container_of(req, struct nvme_loop_iod, req);
- struct nvme_completion *cqe = &iod->rsp;
+ struct nvme_loop_queue *queue =
+ container_of(req->sq, struct nvme_loop_queue, nvme_sq);
+ struct nvme_completion *cqe = req->rsp;
/*
* AEN requests are special as they don't time out and can
@@ -125,15 +118,22 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
+ if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
- struct request *rq = blk_mq_rq_from_pdu(iod);
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on queue %d not found\n",
+ cqe->command_id, nvme_loop_queue_idx(queue));
+ return;
+ }
- iod->nvme_req.result = cqe->result;
- blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
+ nvme_end_request(rq, cqe->status, cqe->result);
}
}
@@ -154,7 +154,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
schedule_work(&iod->queue->ctrl->reset_work);
/* fail with DNR on admin cmd timeout */
- rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
return BLK_EH_HANDLED;
}
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx)
{
- BUG_ON(queue_idx >= ctrl->queue_count);
-
iod->req.cmd = &iod->cmd;
iod->req.rsp = &iod->rsp;
iod->queue = &ctrl->queues[queue_idx];
@@ -270,7 +268,7 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static struct blk_mq_ops nvme_loop_mq_ops = {
+static const struct blk_mq_ops nvme_loop_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request,
@@ -278,7 +276,7 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
.timeout = nvme_loop_timeout,
};
-static struct blk_mq_ops nvme_loop_admin_mq_ops = {
+static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_admin_request,
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
- nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,56 @@ free_ctrl:
kfree(ctrl);
}
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+ int ret, i;
+
+ nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret || !nr_io_queues)
+ return ret;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+ for (i = 1; i <= nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->queue_count++;
+ }
+
+ return 0;
+
+out_destroy_queues:
+ nvme_loop_destroy_io_queues(ctrl);
+ return ret;
+}
+
+static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
@@ -357,7 +405,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (error)
@@ -385,17 +433,13 @@ out_free_sq:
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
- int i;
-
nvme_stop_keep_alive(&ctrl->ctrl);
if (ctrl->queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
-
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_io_queues(ctrl);
}
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -459,7 +503,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
struct nvme_loop_ctrl *ctrl = container_of(work,
struct nvme_loop_ctrl, reset_work);
bool changed;
- int i, ret;
+ int ret;
nvme_loop_shutdown_ctrl(ctrl);
@@ -467,20 +511,13 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
if (ret)
goto out_disable;
- for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
- ctrl->queues[i].ctrl = ctrl;
- ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
- if (ret)
- goto out_free_queues;
-
- ctrl->queue_count++;
- }
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
+ goto out_destroy_admin;
- for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
- if (ret)
- goto out_free_queues;
- }
+ ret = nvme_loop_connect_io_queues(ctrl);
+ if (ret)
+ goto out_destroy_io;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
@@ -492,9 +529,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
return;
-out_free_queues:
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+out_destroy_io:
+ nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
nvme_loop_destroy_admin_queue(ctrl);
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +570,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- int ret, i;
+ int ret;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
- if (ret || !opts->nr_io_queues)
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
return ret;
- dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
- opts->nr_io_queues);
-
- for (i = 1; i <= opts->nr_io_queues; i++) {
- ctrl->queues[i].ctrl = ctrl;
- ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
- if (ret)
- goto out_destroy_queues;
-
- ctrl->queue_count++;
- }
-
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,11 +599,9 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
goto out_free_tagset;
}
- for (i = 1; i <= opts->nr_io_queues; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
- if (ret)
- goto out_cleanup_connect_q;
- }
+ ret = nvme_loop_connect_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
return 0;
@@ -588,8 +610,7 @@ out_cleanup_connect_q:
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_io_queues(ctrl);
return ret;
}
@@ -724,7 +745,12 @@ static int __init nvme_loop_init_module(void)
ret = nvmet_register_transport(&nvme_loop_ops);
if (ret)
return ret;
- return nvmf_register_transport(&nvme_loop_transport);
+
+ ret = nvmf_register_transport(&nvme_loop_transport);
+ if (ret)
+ nvmet_unregister_transport(&nvme_loop_ops);
+
+ return ret;
}
static void __exit nvme_loop_cleanup_module(void)