summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/core.c29
-rw-r--r--drivers/nvme/target/fc.c41
-rw-r--r--drivers/nvme/target/fcloop.c8
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c6
5 files changed, 44 insertions, 42 deletions
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 884286f90688..5d7d483bfbe3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return 0;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct nvmet_ns *ns)
{
@@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
@@ -1539,15 +1538,14 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
return false;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!p2p_client)
return;
@@ -1557,14 +1555,13 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
pci_dev_put(radix_tree_deref_slot(slot));
@@ -1960,24 +1957,24 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
- error = nvmet_init_discovery();
+ error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_debugfs();
+ error = nvmet_init_discovery();
if (error)
- goto out_exit_discovery;
+ goto out_exit_debugfs;
error = nvmet_init_configfs();
if (error)
- goto out_exit_debugfs;
+ goto out_exit_discovery;
return 0;
-out_exit_debugfs:
- nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1992,8 +1989,8 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
- nvmet_exit_debugfs();
nvmet_exit_discovery();
+ nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 25598a46bf0d..7d84527d5a43 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
int ls_error;
struct list_head lsreq_list; /* tgtport->ls_req_list */
bool req_queued;
+
+ struct work_struct put_work;
};
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
-
- struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+static void nvmet_fc_put_lsop_work(struct work_struct *work)
{
- struct nvmet_fc_tgtport *tgtport =
- container_of(work, struct nvmet_fc_tgtport, put_work);
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(work, struct nvmet_fc_ls_req_op, put_work);
- nvmet_fc_tgtport_put(tgtport);
+ nvmet_fc_tgtport_put(lsop->tgtport);
+ kfree(lsop);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
DMA_BIDIRECTIONAL);
out_putwork:
- queue_work(nvmet_wq, &tgtport->put_work);
+ queue_work(nvmet_wq, &lsop->put_work);
}
static int
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsreq->done = done;
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
+ INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvmet_fc_finish_ls_req(lsop);
/* fc-nvme target doesn't care about success or failure of cmd */
-
- kfree(lsop);
}
/*
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme target is such that it's
+ * The behavior of the fc-nvme target is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme host, so the target may never get a
@@ -1075,6 +1075,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
+ int terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
nvmet_fc_tgtport_get(assoc->tgtport);
if (!queue_work(nvmet_wq, &assoc->del_work))
nvmet_fc_tgtport_put(assoc->tgtport);
@@ -1202,13 +1210,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
unsigned long flags;
- int i, terminating;
-
- terminating = atomic_xchg(&assoc->terminating, 1);
-
- /* if already terminating, do nothing */
- if (terminating)
- return;
+ int i;
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
@@ -1410,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
- INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -2313,7 +2314,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret) {
/*
- * should be ok to set w/o lock as its in the thread of
+ * should be ok to set w/o lock as it's in the thread of
* execution (not an async timer routine) and doesn't
* contend with any clearing action
*/
@@ -2629,7 +2630,7 @@ transport_error:
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
- * asynchronously received - its possible for a command to be received
+ * asynchronously received - it's possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 257b497d515a..5dffcc5becae 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (!targetport) {
/*
* The target port is gone. The target doesn't expect any
- * response anymore and the ->done call is not valid
- * because the resources have been freed by
- * nvmet_fc_free_pending_reqs.
+ * response anymore and thus lsreq can't be accessed anymore.
*
* We end up here from delete association exchange:
* nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+ * Return success because this is what LLDDs do; silently
+ * drop the response.
*/
+ lsrsp->done(lsrsp);
kmem_cache_free(lsreq_cache, tls_req);
return 0;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 3b4b0df8f879..0c361b1e3566 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
+ case NVME_FEAT_FDP:
+ return nvmet_setup_passthru_command(req);
default:
return nvmet_passthru_get_set_features(req);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 67f61c67c167..0485e25ab797 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
* We registered an ib_client to handle device removal for queues,
* so we only need to handle the listening port cm_ids. In this case
* we nullify the priv to prevent double cm_id destruction and destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * the cm_id implicitly by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* This is a queue cm_id. we have registered
* an ib_client to handle queues removal
- * so don't interfear and just return.
+ * so don't interfere and just return.
*/
return 0;
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* We need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
+ * its own ID. What a great API design..
*/
return 1;
}