summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2020-03-31 19:50:10 +0300
committerJens Axboe <axboe@kernel.dk>2020-05-10 01:18:34 +0300
commit4c2805aab519a39e8adf281afcef40174d48fd3f (patch)
tree59eaf98d3b552d44d2bc550db4550d938f21f8f8 /drivers
parent9aa09e98b288649544c74d1a7b88223f36e4bffd (diff)
downloadlinux-4c2805aab519a39e8adf281afcef40174d48fd3f.tar.xz
lpfc: nvmet: Add support for NVME LS request hosthandle
As the nvmet layer does not have the concept of a remoteport object, which can be used to identify the entity on the other end of the fabric that is to receive an LS, the hosthandle was introduced. The driver passes the hosthandle, a value representative of the remote port, with a ls request receive. The LS request will create the association. The transport will remember the hosthandle for the association, and if there is a need to initiate a LS request to the remote port for the association, the hosthandle will be used. When the driver loses connectivity with the remote port, it needs to notify the transport that the hosthandle is no longer valid, allowing the transport to terminate associations related to the hosthandle. This patch adds support to the driver for the hosthandle. The driver will use the ndlp pointer of the remote port for the hosthandle in calls to nvmet_fc_rcv_ls_req(). The discovery engine is updated to invalidate the hosthandle whenever connectivity with the remote port is lost. Signed-off-by: Paul Ely <paul.ely@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c53
5 files changed, 74 insertions, 1 deletions
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 07b4f0fd3fe2..9ee6b930a655 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -571,6 +571,8 @@ void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
uint8_t cqflag);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
+void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 8dec7b7c06d1..f5952f8cd4b5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -823,6 +823,12 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
lpfc_disc_state_machine(vport, ndlp, NULL,
remove
? NLP_EVT_DEVICE_RM
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 81f4ba1c24b4..d8501bd959e7 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -489,6 +489,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(unsigned long long)
wwn_to_u64(sp->portName.u.wwn));
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
ndlp->nlp_prev_state = ndlp->nlp_state;
/* rport needs to be unregistered first */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -839,6 +844,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
if (ndlp->nlp_DID == Fabric_DID) {
if (vport->port_state <= LPFC_FDISC)
goto out;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index f28a6789e252..4a4c3f780e1f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -98,9 +98,12 @@ struct lpfc_nvme_fcpreq_priv {
#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
/* Used for NVME Target */
+#define LPFC_NVMET_INV_HOST_ACTIVE 1
+
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
struct completion *tport_unreg_cmp;
+ atomic_t state; /* tracks nvmet hosthandle invalidation */
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6a1365df6c65..5584eaafbfb2 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1283,6 +1283,24 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
}
static void
+lpfc_nvmet_host_release(void *hosthandle)
+{
+ struct lpfc_nodelist *ndlp = hosthandle;
+ struct lpfc_hba *phba = NULL;
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ phba = ndlp->phba;
+ if (!phba->targetport || !phba->targetport->private)
+ return;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6202 NVMET XPT releasing hosthandle x%px\n",
+ hosthandle);
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_set(&tgtp->state, 0);
+}
+
+static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
{
struct lpfc_nvmet_tgtport *tgtp;
@@ -1306,6 +1324,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
.discovery_event = lpfc_nvmet_discovery_event,
+ .host_release = lpfc_nvmet_host_release,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -2044,7 +2063,12 @@ lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
atomic_inc(&tgtp->rcv_ls_req_in);
- rc = nvmet_fc_rcv_ls_req(phba->targetport, NULL, &axchg->ls_rsp,
+ /*
+ * Driver passes the ndlp as the hosthandle argument allowing
+ * the transport to generate LS requests for any associateions
+ * that are created.
+ */
+ rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
axchg->payload, axchg->size);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -3476,3 +3500,30 @@ out:
"6056 Failed to Issue ABTS. Status x%x\n", rc);
return 0;
}
+
+/**
+ * lpfc_nvmet_invalidate_host
+ *
+ * @phba - pointer to the driver instance bound to an adapter port.
+ * @ndlp - pointer to an lpfc_nodelist type
+ *
+ * This routine upcalls the nvmet transport to invalidate an NVME
+ * host to which this target instance had active connections.
+ */
+void
+lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
+ "6203 Invalidating hosthandle x%px\n",
+ ndlp);
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
+
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ /* Need to get the nvmet_fc_target_port pointer here.*/
+ nvmet_fc_invalidate_host(phba->targetport, ndlp);
+#endif
+}