summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2017-05-16 01:20:46 +0300
committerMartin K. Petersen <martin.petersen@oracle.com>2017-05-17 04:22:22 +0300
commita8cf5dfeb4d84248c0ad12386ae0cb36ee21589a (patch)
treefb4580dc29cc56663b5702a688bf3518bd6ff7e6
parent6c621a2229b084da0d926967f84b059a10c26ede (diff)
downloadlinux-a8cf5dfeb4d84248c0ad12386ae0cb36ee21589a.tar.xz
scsi: lpfc: Added recovery logic for running out of NVMET IO context resources
Previous logic would just drop the IO. Added logic to queue the IO to wait for an IO context resource from an IO thats already in progress. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c138
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
8 files changed, 144 insertions, 24 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 72641b1d3ab8..c47bde6205c9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -170,6 +170,7 @@ struct rqb_dmabuf {
struct lpfc_dmabuf dbuf;
uint16_t total_size;
uint16_t bytes_recv;
+ uint16_t idx;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */
};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 65264582915a..bb2d9e238225 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -245,6 +245,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "IO_CTX: %08x outstanding %08x total %x",
+ phba->sli4_hba.nvmet_ctx_cnt,
+ phba->sli4_hba.nvmet_io_wait_cnt,
+ phba->sli4_hba.nvmet_io_wait_total);
+
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
return len;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cc95abd130b4..8912767e7bc8 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -77,6 +77,8 @@ void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
+int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq, int count, int idx);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7284533f4df2..c7d1c9d37a64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -842,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
}
+
+ len += snprintf(buf + len, size - len,
+ "IO_CTX: %08x outstanding %08x total %08x\n",
+ phba->sli4_hba.nvmet_ctx_cnt,
+ phba->sli4_hba.nvmet_io_wait_cnt,
+ phba->sli4_hba.nvmet_io_wait_total);
} else {
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86b0b26dfeea..9f6c7e71814b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5825,6 +5825,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
@@ -5833,6 +5834,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/*
* Initialize driver internal slow-path work queues
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fcc77ae0c71c..312f54278bd4 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -158,6 +158,12 @@ void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
{
struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct fc_frame_header *fc_hdr;
+ struct rqb_dmabuf *nvmebuf;
+ struct lpfc_dmabuf *hbufp;
+ uint32_t *payload;
+ uint32_t size, oxid, sid, rc;
unsigned long iflag;
if (ctxp->txrdy) {
@@ -168,6 +174,87 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
ctxp->state = LPFC_NVMET_STE_FREE;
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+ if (phba->sli4_hba.nvmet_io_wait_cnt) {
+ hbufp = &nvmebuf->hbuf;
+ list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
+ nvmebuf, struct rqb_dmabuf,
+ hbuf.list);
+ phba->sli4_hba.nvmet_io_wait_cnt--;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+ iflag);
+
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ payload = (uint32_t *)(nvmebuf->dbuf.virt);
+ size = nvmebuf->bytes_recv;
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+ ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ memset(ctxp, 0, sizeof(ctxp->ctx));
+ ctxp->wqeq = NULL;
+ ctxp->txrdy = NULL;
+ ctxp->offset = 0;
+ ctxp->phba = phba;
+ ctxp->size = size;
+ ctxp->oxid = oxid;
+ ctxp->sid = sid;
+ ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->entry_cnt = 1;
+ ctxp->flag = 0;
+ ctxp->ctxbuf = ctx_buf;
+ spin_lock_init(&ctxp->ctxlock);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ ctxp->ts_cmd_nvme = ktime_get_ns();
+ ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
+ ctxp->ts_nvme_data = 0;
+ ctxp->ts_data_wqput = 0;
+ ctxp->ts_isr_data = 0;
+ ctxp->ts_data_nvme = 0;
+ ctxp->ts_nvme_status = 0;
+ ctxp->ts_status_wqput = 0;
+ ctxp->ts_isr_status = 0;
+ ctxp->ts_status_nvme = 0;
+ }
+#endif
+ atomic_inc(&tgtp->rcv_fcp_cmd_in);
+ /*
+ * The calling sequence should be:
+ * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+ * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+ * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+ * the NVME command / FC header is stored.
+ * A buffer has already been reposted for this IO, so just free
+ * the nvmebuf.
+ */
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+ payload, size);
+
+ /* Process FCP command */
+ if (rc == 0) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ return;
+ }
+
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+ ctxp->oxid, rc,
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
+ atomic_read(&tgtp->xmt_fcp_release));
+
+ lpfc_nvmet_defer_release(phba, ctxp);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ return;
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
list_add_tail(&ctx_buf->list,
&phba->sli4_hba.lpfc_nvmet_ctx_list);
@@ -1232,7 +1319,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_ctxbuf *ctx_buf;
uint32_t *payload;
- uint32_t size, oxid, sid, rc;
+ uint32_t size, oxid, sid, rc, qno;
unsigned long iflag;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
@@ -1257,21 +1344,41 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ size = nvmebuf->bytes_recv;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+ id = smp_processor_id();
+ if (id < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_rcv_io[id]++;
+ }
+#endif
+
+ lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
+ oxid, size, smp_processor_id());
+
if (!ctx_buf) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6408 No NVMET ctx Drop IO\n");
- oxid = 0;
- size = 0;
- sid = 0;
- ctxp = NULL;
- goto dropit;
+ /* Queue this NVME IO to process later */
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+ list_add_tail(&nvmebuf->hbuf.list,
+ &phba->sli4_hba.lpfc_nvmet_io_wait_list);
+ phba->sli4_hba.nvmet_io_wait_cnt++;
+ phba->sli4_hba.nvmet_io_wait_total++;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+ iflag);
+
+ /* Post a brand new DMA buffer to RQ */
+ qno = nvmebuf->idx;
+ lpfc_post_rq_buffer(
+ phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+ phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+ return;
}
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- size = nvmebuf->bytes_recv;
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
@@ -1302,17 +1409,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->ts_isr_status = 0;
ctxp->ts_status_nvme = 0;
}
-
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
- id = smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT)
- phba->cpucheck_rcv_io[id]++;
- }
#endif
- lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
- oxid, size, smp_processor_id());
-
atomic_inc(&tgtp->rcv_fcp_cmd_in);
/*
* The calling sequence should be:
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d68ee3ee299a..3fb4e715bfa2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6513,9 +6513,9 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
}
-static int
+int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
- struct lpfc_queue *drq, int count)
+ struct lpfc_queue *drq, int count, int idx)
{
int rc, i;
struct lpfc_rqe hrqe;
@@ -6534,6 +6534,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
break;
rqb_buffer->hrq = hrq;
rqb_buffer->drq = drq;
+ rqb_buffer->idx = idx;
list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
}
while (!list_empty(&rqb_buf_list)) {
@@ -6980,7 +6981,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[i],
phba->sli4_hba.nvmet_mrq_data[i],
- LPFC_NVMET_RQE_DEF_COUNT);
+ LPFC_NVMET_RQE_DEF_COUNT, i);
}
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 19e2f190ea2e..c1c9a9125266 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -619,13 +619,16 @@ struct lpfc_sli4_hba {
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
uint16_t nvmet_ctx_cnt;
+ uint16_t nvmet_io_wait_cnt;
+ uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list;
struct list_head lpfc_abts_nvmet_ctx_list;
- struct list_head lpfc_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
+ struct list_head lpfc_nvmet_ctx_list;
+ struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
@@ -657,6 +660,7 @@ struct lpfc_sli4_hba {
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_lock;
+ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;
/* CPU to vector mapping information */