diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 01:14:01 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 01:14:01 +0300 |
commit | ba6d10ab8014ac10d25ca513352b6665e73b5785 (patch) | |
tree | 3b7aaa3f2d76d0c0e9612bc87e1da45577465528 /drivers/scsi/lpfc/lpfc_nvmet.c | |
parent | 64b08df460cfdfc2b010263043a057cdd33500ed (diff) | |
parent | baf23eddbf2a4ba9bf2bdb342686c71a8042e39b (diff) | |
download | linux-ba6d10ab8014ac10d25ca513352b6665e73b5785.tar.xz |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This is mostly update of the usual drivers: qla2xxx, hpsa, lpfc, ufs,
mpt3sas, ibmvscsi, megaraid_sas, bnx2fc and hisi_sas as well as the
removal of the osst driver (I heard from Willem privately that he
would like the driver removed because all his test hardware has
failed). Plus number of minor changes, spelling fixes and other
trivia.
The big merge conflict this time around is the SPDX licence tags.
Following discussion on linux-next, we believe our version to be more
accurate than the one in the tree, so the resolution is to take our
version for all the SPDX conflicts"
Note on the SPDX license tag conversion conflicts: the SCSI tree had
done its own SPDX conversion, which in some cases conflicted with the
treewide ones done by Thomas & co.
In almost all cases, the conflicts were purely syntactic: the SCSI tree
used the old-style SPDX tags ("GPL-2.0" and "GPL-2.0+") while the
treewide conversion had used the new-style ones ("GPL-2.0-only" and
"GPL-2.0-or-later").
In these cases I picked the new-style one.
In a few cases, the SPDX conversion was actually different, though. As
explained by James above, and in more detail in a pre-pull-request
thread:
"The other problem is actually substantive: In the libsas code Luben
Tuikov originally specified gpl 2.0 only by dint of stating:
* This file is licensed under GPLv2.
In all the libsas files, but then muddied the water by quoting GPLv2
verbatim (which includes the or later than language). So for these
files Christoph did the conversion to v2 only SPDX tags and Thomas
converted to v2 or later tags"
So in those cases, where the spdx tag substantially mattered, I took the
SCSI tree conversion of it, but then also took the opportunity to turn
the old-style "GPL-2.0" into a new-style "GPL-2.0-only" tag.
Similarly, when there were whitespace differences or other differences
to the comments around the copyright notices, I took the version from
the SCSI tree as being the more specific conversion.
Finally, in the spdx conversions that had no conflicts (because the
treewide ones hadn't been done for those files), I just took the SCSI
tree version as-is, even if it was old-style. The old-style conversions
are perfectly valid, even if the "-only" and "-or-later" versions are
perhaps more descriptive.
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (185 commits)
scsi: qla2xxx: move IO flush to the front of NVME rport unregistration
scsi: qla2xxx: Fix NVME cmd and LS cmd timeout race condition
scsi: qla2xxx: on session delete, return nvme cmd
scsi: qla2xxx: Fix kernel crash after disconnecting NVMe devices
scsi: megaraid_sas: Update driver version to 07.710.06.00-rc1
scsi: megaraid_sas: Introduce various Aero performance modes
scsi: megaraid_sas: Use high IOPS queues based on IO workload
scsi: megaraid_sas: Set affinity for high IOPS reply queues
scsi: megaraid_sas: Enable coalescing for high IOPS queues
scsi: megaraid_sas: Add support for High IOPS queues
scsi: megaraid_sas: Add support for MPI toolbox commands
scsi: megaraid_sas: Offload Aero RAID5/6 division calculations to driver
scsi: megaraid_sas: RAID1 PCI bandwidth limit algorithm is applicable for only Ventura
scsi: megaraid_sas: megaraid_sas: Add check for count returned by HOST_DEVICE_LIST DCMD
scsi: megaraid_sas: Handle sequence JBOD map failure at driver level
scsi: megaraid_sas: Don't send FPIO to RL Bypass queue
scsi: megaraid_sas: In probe context, retry IOC INIT once if firmware is in fault
scsi: megaraid_sas: Release Mutex lock before OCR in case of DCMD timeout
scsi: megaraid_sas: Call disable_irq from process IRQ poll
scsi: megaraid_sas: Remove few debug counters from IO path
...
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 332 |
1 files changed, 279 insertions, 53 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 06170824a69b..d5812719de2b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -220,19 +220,68 @@ lpfc_nvmet_cmd_template(void) /* Word 12, 13, 14, 15 - is zero */ } +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) +static struct lpfc_nvmet_rcv_ctx * +lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + unsigned long iflag; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) + continue; + + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + if (found) + return ctxp; + + return NULL; +} + +static struct lpfc_nvmet_rcv_ctx * +lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid) +{ + struct lpfc_nvmet_rcv_ctx *ctxp; + unsigned long iflag; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) { + if (ctxp->oxid != oxid || ctxp->sid != sid) + continue; + + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); + if (found) + return ctxp; + + return NULL; +} +#endif + static void lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) { lockdep_assert_held(&ctxp->ctxlock); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6313 NVMET Defer ctx release xri x%x flg x%x\n", + "6313 NVMET Defer ctx release oxid x%x flg x%x\n", ctxp->oxid, ctxp->flag); if (ctxp->flag & LPFC_NVMET_CTX_RLS) return; ctxp->flag |= LPFC_NVMET_CTX_RLS; + spin_lock(&phba->sli4_hba.t_active_list_lock); + list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.t_active_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); @@ -343,16 +392,23 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) } if (ctxp->rqb_buffer) { - nvmebuf = ctxp->rqb_buffer; spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->rqb_buffer = NULL; - if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { - ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ; - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + nvmebuf = ctxp->rqb_buffer; + /* check if freed in another path whilst acquiring lock */ + if (nvmebuf) { + ctxp->rqb_buffer = NULL; + if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { + ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, + nvmebuf); + } else { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + /* repost */ + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + } } else { spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ } } ctxp->state = LPFC_NVMET_STE_FREE; @@ -388,8 +444,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (ctxp->ts_cmd_nvme) { - ctxp->ts_cmd_nvme = ktime_get_ns(); + /* NOTE: isr time stamp is stale when context is re-assigned*/ + if (ctxp->ts_isr_cmd) { + ctxp->ts_cmd_nvme = 0; ctxp->ts_nvme_data = 0; ctxp->ts_data_wqput = 0; ctxp->ts_isr_data = 0; @@ -402,9 +459,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - /* flag new work queued, replacement buffer has already - * been reposted - */ + /* Indicate that a replacement buffer has been posted */ spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); @@ -433,6 +488,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) * Use the CPU context list, from the MRQ the IO was received on * (ctxp->idx), to save context structure. */ + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_del_init(&ctxp->list); + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); cpu = raw_smp_processor_id(); infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); @@ -700,8 +758,10 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } lpfc_printf_log(phba, KERN_INFO, logerr, - "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n", - ctxp->oxid, status, result, ctxp->flag); + "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x " + "XBUSY:x%x\n", + ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag, + status, result, ctxp->flag); } else { rsp->fcp_error = NVME_SC_SUCCESS; @@ -849,7 +909,6 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, * before freeing ctxp and iocbq. */ lpfc_in_buf_free(phba, &nvmebuf->dbuf); - ctxp->rqb_buffer = 0; atomic_inc(&nvmep->xmt_ls_rsp); return 0; } @@ -922,7 +981,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, (ctxp->state == LPFC_NVMET_STE_ABORT)) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6102 IO xri x%x aborted\n", + "6102 IO oxid x%x aborted\n", ctxp->oxid); rc = -ENXIO; goto aerr; @@ -1022,7 +1081,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, ctxp->hdwq = &phba->sli4_hba.hdwq[0]; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", + "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n", ctxp->oxid, ctxp->flag, ctxp->state); lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", @@ -1035,7 +1094,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, /* Since iaab/iaar are NOT set, we need to check * if the firmware is in process of aborting IO */ - if (ctxp->flag & LPFC_NVMET_XBUSY) { + if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) { spin_unlock_irqrestore(&ctxp->ctxlock, flags); return; } @@ -1098,6 +1157,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ctxp->state, aborting); atomic_inc(&lpfc_nvmep->xmt_fcp_release); + ctxp->flag &= ~LPFC_NVMET_TNOTIFY; if (aborting) return; @@ -1122,7 +1182,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, if (!nvmebuf) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, - "6425 Defer rcv: no buffer xri x%x: " + "6425 Defer rcv: no buffer oxid x%x: " "flg %x ste %x\n", ctxp->oxid, ctxp->flag, ctxp->state); return; @@ -1514,10 +1574,12 @@ void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; struct lpfc_nvmet_tgtport *tgtp; + struct nvmefc_tgt_fcp_req *req = NULL; struct lpfc_nodelist *ndlp; unsigned long iflag = 0; int rrq_empty = 0; @@ -1548,7 +1610,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, */ if (ctxp->flag & LPFC_NVMET_CTX_RLS && !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { - list_del(&ctxp->list); + list_del_init(&ctxp->list); released = true; } ctxp->flag &= ~LPFC_NVMET_XBUSY; @@ -1568,7 +1630,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, } lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6318 XB aborted oxid %x flg x%x (%x)\n", + "6318 XB aborted oxid x%x flg x%x (%x)\n", ctxp->oxid, ctxp->flag, released); if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); @@ -1579,6 +1641,33 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, } spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); + + ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); + if (ctxp) { + /* + * Abort already done by FW, so BA_ACC sent. + * However, the transport may be unaware. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6323 NVMET Rcv ABTS xri x%x ctxp state x%x " + "flag x%x oxid x%x rxid x%x\n", + xri, ctxp->state, ctxp->flag, ctxp->oxid, + rxid); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVMET_ABTS_RCV; + ctxp->state = LPFC_NVMET_STE_ABORT; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + req = &ctxp->ctx.fcp_req; + if (req) + nvmet_fc_rcv_fcp_abort(phba->targetport, req); + } +#endif } int @@ -1589,19 +1678,23 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct lpfc_hba *phba = vport->phba; struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; struct nvmefc_tgt_fcp_req *rsp; - uint16_t xri; + uint32_t sid; + uint16_t oxid, xri; unsigned long iflag = 0; - xri = be16_to_cpu(fc_hdr->fh_ox_id); + sid = sli4_sid_from_fc_hdr(fc_hdr); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { - if (ctxp->ctxbuf->sglq->sli4_xritag != xri) + if (ctxp->oxid != oxid || ctxp->sid != sid) continue; + xri = ctxp->ctxbuf->sglq->sli4_xritag; + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -1626,11 +1719,93 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", - xri, raw_smp_processor_id(), 1); + /* check the wait list */ + if (phba->sli4_hba.nvmet_io_wait_cnt) { + struct rqb_dmabuf *nvmebuf; + struct fc_frame_header *fc_hdr_tmp; + u32 sid_tmp; + u16 oxid_tmp; + bool found = false; + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + + /* match by oxid and s_id */ + list_for_each_entry(nvmebuf, + &phba->sli4_hba.lpfc_nvmet_io_wait_list, + hbuf.list) { + fc_hdr_tmp = (struct fc_frame_header *) + (nvmebuf->hbuf.virt); + oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id); + sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp); + if (oxid_tmp != oxid || sid_tmp != sid) + continue; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6321 NVMET Rcv ABTS oxid x%x from x%x " + "is waiting for a ctxp\n", + oxid, sid); + + list_del_init(&nvmebuf->hbuf.list); + phba->sli4_hba.nvmet_io_wait_cnt--; + found = true; + break; + } + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + /* free buffer since already posted a new DMA buffer to RQ */ + if (found) { + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + /* Respond with BA_ACC accordingly */ + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + } + + /* check active list */ + ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid); + if (ctxp) { + xri = ctxp->ctxbuf->sglq->sli4_xritag; + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + + lpfc_nvmeio_data(phba, + "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", + xri, raw_smp_processor_id(), 0); + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x " + "flag x%x state x%x\n", + ctxp->oxid, xri, ctxp->flag, ctxp->state); + + if (ctxp->flag & LPFC_NVMET_TNOTIFY) { + /* Notify the transport */ + nvmet_fc_rcv_fcp_abort(phba->targetport, + &ctxp->ctx.fcp_req); + } else { + cancel_work_sync(&ctxp->ctxbuf->defer_work); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + } + if (ctxp->state == LPFC_NVMET_STE_RCV) + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + else + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + + lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); + return 0; + } + + lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n", + oxid, raw_smp_processor_id(), 1); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); + "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid); /* Respond with BA_RJT accordingly */ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); @@ -1714,6 +1889,18 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, spin_unlock_irqrestore(&pring->ring_lock, iflags); return; } + if (rc == WQE_SUCCESS) { +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_cmd_nvme) { + if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP) + ctxp->ts_status_wqput = ktime_get_ns(); + else + ctxp->ts_data_wqput = ktime_get_ns(); + } +#endif + } else { + WARN_ON(rc); + } } wq->q_flag &= ~HBA_NVMET_WQFULL; spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -1879,8 +2066,20 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) return; } + if (ctxp->flag & LPFC_NVMET_ABTS_RCV) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6324 IO oxid x%x aborted\n", + ctxp->oxid); + return; + } + payload = (uint32_t *)(nvmebuf->dbuf.virt); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + ctxp->flag |= LPFC_NVMET_TNOTIFY; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (ctxp->ts_isr_cmd) + ctxp->ts_cmd_nvme = ktime_get_ns(); +#endif /* * The calling sequence should be: * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done @@ -1930,6 +2129,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); return; } + ctxp->flag &= ~LPFC_NVMET_TNOTIFY; atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", @@ -2019,6 +2219,8 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, * @phba: pointer to lpfc hba data structure. * @idx: relative index of MRQ vector * @nvmebuf: pointer to lpfc nvme command HBQ data structure. + * @isr_timestamp: in jiffies. + * @cqflag: cq processing information regarding workload. * * This routine is used for processing the WQE associated with a unsolicited * event. It first determines whether there is an existing ndlp that matches @@ -2031,7 +2233,8 @@ static void lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, uint32_t idx, struct rqb_dmabuf *nvmebuf, - uint64_t isr_timestamp) + uint64_t isr_timestamp, + uint8_t cqflag) { struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; @@ -2118,6 +2321,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, sid = sli4_sid_from_fc_hdr(fc_hdr); ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; + spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag); + list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); + spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (ctxp->state != LPFC_NVMET_STE_FREE) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6414 NVMET Context corrupt %d %d oxid x%x\n", @@ -2140,24 +2346,41 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (isr_timestamp) { + if (isr_timestamp) ctxp->ts_isr_cmd = isr_timestamp; - ctxp->ts_cmd_nvme = ktime_get_ns(); - ctxp->ts_nvme_data = 0; - ctxp->ts_data_wqput = 0; - ctxp->ts_isr_data = 0; - ctxp->ts_data_nvme = 0; - ctxp->ts_nvme_status = 0; - ctxp->ts_status_wqput = 0; - ctxp->ts_isr_status = 0; - ctxp->ts_status_nvme = 0; - } else { - ctxp->ts_cmd_nvme = 0; - } + ctxp->ts_cmd_nvme = 0; + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - lpfc_nvmet_process_rcv_fcp_req(ctx_buf); + /* check for cq processing load */ + if (!cqflag) { + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); + return; + } + + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6325 Unable to queue work for oxid x%x. " + "FCP Drop IO [x%x x%x x%x]\n", + ctxp->oxid, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + } } /** @@ -2194,6 +2417,8 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * @phba: pointer to lpfc hba data structure. * @idx: relative index of MRQ vector * @nvmebuf: pointer to received nvme data structure. + * @isr_timestamp: in jiffies. + * @cqflag: cq processing information regarding workload. * * This routine is used to process an unsolicited event received from a SLI * (Service Level Interface) ring. The actual processing of the data buffer @@ -2205,14 +2430,14 @@ void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx, struct rqb_dmabuf *nvmebuf, - uint64_t isr_timestamp) + uint64_t isr_timestamp, + uint8_t cqflag) { if (phba->nvmet_support == 0) { lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } - lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, - isr_timestamp); + lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag); } /** @@ -2750,7 +2975,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - list_del(&ctxp->list); + list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } @@ -2759,7 +2984,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6165 ABORT cmpl: xri x%x flg x%x (%d) " + "6165 ABORT cmpl: oxid x%x flg x%x (%d) " "WCQE: %08x %08x %08x %08x\n", ctxp->oxid, ctxp->flag, released, wcqe->word0, wcqe->total_data_placed, @@ -2834,7 +3059,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - list_del(&ctxp->list); + list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } @@ -2843,7 +3068,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6316 ABTS cmpl xri x%x flg x%x (%x) " + "6316 ABTS cmpl oxid x%x flg x%x (%x) " "WCQE: %08x %08x %08x %08x\n", ctxp->oxid, ctxp->flag, released, wcqe->word0, wcqe->total_data_placed, @@ -3214,7 +3439,7 @@ aerr: spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVMET_CTX_RLS) { spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - list_del(&ctxp->list); + list_del_init(&ctxp->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } @@ -3223,8 +3448,9 @@ aerr: atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, - "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", - ctxp->oxid, rc); + "6135 Failed to Issue ABTS for oxid x%x. Status x%x " + "(%x)\n", + ctxp->oxid, rc, released); if (released) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); return 1; |