diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 470 |
1 files changed, 354 insertions, 116 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 8dbf5c9d51aa..7271c9d885dd 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -36,7 +36,7 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> -#include <../drivers/nvme/host/nvme.h> +#include <linux/nvme.h> #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> @@ -71,6 +71,151 @@ static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, struct lpfc_nvmet_rcv_ctx *, uint32_t, uint16_t); +static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_nvmet_rcv_ctx *); + +static union lpfc_wqe128 lpfc_tsend_cmd_template; +static union lpfc_wqe128 lpfc_treceive_cmd_template; +static union lpfc_wqe128 lpfc_trsp_cmd_template; + +/* Setup WQE templates for NVME IOs */ +void +lpfc_nvmet_cmd_template(void) +{ + union lpfc_wqe128 *wqe; + + /* TSEND template */ + wqe = &lpfc_tsend_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - payload_offset_len is zero */ + + /* Word 4 - relative_offset is variable */ + + /* Word 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 - wqe_ar is variable */ + bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); + bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); + bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag, rcvoxid is variable */ + + /* Word 10 - wqes, xc is variable */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12); + + /* Word 11 - sup, irsp, irsplen is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND); + bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0); + + /* Word 12 - fcp_data_len is variable */ + + /* Word 13, 14, 15 - PBDE is zero */ + + /* TRECEIVE template */ + wqe = &lpfc_treceive_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 */ + wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; + + /* Word 4 - relative_offset is variable */ + + /* Word 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE); + bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF); + bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag, rcvoxid is variable */ + + /* Word 10 - xc is variable */ + bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); + + /* Word 11 - pbde is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE); + bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1); + + /* Word 12 - fcp_data_len is variable */ + + /* Word 13, 14, 15 - PBDE is variable */ + + /* TRSP template */ + wqe = &lpfc_trsp_cmd_template; + memset(wqe, 0, sizeof(union lpfc_wqe128)); + + /* Word 0, 1, 2 - BDE is variable */ + + /* Word 3 - response_len is variable */ + + /* Word 4, 5 - is zero */ + + /* Word 6 - ctxt_tag, xri_tag is variable */ + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); + bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED); + bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3); + bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI); + bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */ + + /* Word 8 - abort_tag is variable */ + + /* Word 9 - reqtag is variable */ + + /* Word 10 wqes, xc is variable */ + bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); + bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3); + + /* Word 11 irsp, irsplen is variable */ + bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP); + bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); + bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0); + + /* Word 12, 13, 14, 15 - is zero */ +} void lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) @@ -130,7 +275,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (tgtp) { if (status) { atomic_inc(&tgtp->xmt_ls_rsp_error); - if (status == IOERR_ABORT_REQUESTED) + if (result == IOERR_ABORT_REQUESTED) atomic_inc(&tgtp->xmt_ls_rsp_aborted); if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&tgtp->xmt_ls_rsp_xb_set); @@ -268,8 +413,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) "NVMET RCV BUSY: xri x%x sz %d " "from %06x\n", oxid, size, sid); - /* defer repost rcv buffer till .defer_rcv callback */ - ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST; atomic_inc(&tgtp->rcv_fcp_cmd_out); return; } @@ -541,7 +684,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, rsp->transferred_length = 0; if (tgtp) { atomic_inc(&tgtp->xmt_fcp_rsp_error); - if (status == IOERR_ABORT_REQUESTED) + if (result == IOERR_ABORT_REQUESTED) atomic_inc(&tgtp->xmt_fcp_rsp_aborted); } @@ -741,7 +884,10 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_rcv_ctx *ctxp = container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; + struct lpfc_queue *wq; struct lpfc_iocbq *nvmewqeq; + struct lpfc_sli_ring *pring; + unsigned long iflags; int rc; if (phba->pport->load_flag & FC_UNLOADING) { @@ -820,6 +966,22 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, return 0; } + if (rc == -EBUSY) { + /* + * WQ was full, so queue nvmewqeq to be sent after + * WQE release CQE + */ + ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; + wq = phba->sli4_hba.nvme_wq[rsp->hwqid]; + pring = wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + list_add_tail(&nvmewqeq->list, &wq->wqfull_list); + wq->q_flag |= HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); + atomic_inc(&lpfc_nvmep->defer_wqfull); + return 0; + } + /* Give back resources */ atomic_inc(&lpfc_nvmep->xmt_fcp_drop); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, @@ -851,6 +1013,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_rcv_ctx *ctxp = container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; + struct lpfc_queue *wq; unsigned long flags; if (phba->pport->load_flag & FC_UNLOADING) @@ -880,6 +1043,15 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, } ctxp->flag |= LPFC_NVMET_ABORT_OP; + if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx]; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); + lpfc_nvmet_wqfull_flush(phba, wq, ctxp); + return; + } + /* An state of LPFC_NVMET_STE_RCV means we have just received * the NVME command and have not started processing it. * (by issuing any IO WQEs on this exchange yet) @@ -946,11 +1118,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, tgtp = phba->targetport->private; atomic_inc(&tgtp->rcv_fcp_cmd_defer); - if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST) - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - else - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST; + + /* Free the nvmebuf since a new buffer already replaced it */ + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); } static struct nvmet_fc_target_template lpfc_tgttemplate = { @@ -1124,16 +1294,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) } ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; nvmewqe = ctx_buf->iocbq; - wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + wqe = &nvmewqe->wqe; + /* Initialize WQE */ memset(wqe, 0, sizeof(union lpfc_wqe)); - /* Word 7 */ - bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); - bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); - /* Word 10 */ - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); - bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); ctx_buf->iocbq->context1 = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); @@ -1280,6 +1444,9 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) atomic_set(&tgtp->xmt_abort_sol, 0); atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); + atomic_set(&tgtp->defer_ctx, 0); + atomic_set(&tgtp->defer_fod, 0); + atomic_set(&tgtp->defer_wqfull, 0); } return error; } @@ -1435,16 +1602,103 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, return 0; } +static void +lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_nvmet_rcv_ctx *ctxp) +{ + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + struct lpfc_iocbq *next_nvmewqeq; + unsigned long iflags; + struct lpfc_wcqe_complete wcqe; + struct lpfc_wcqe_complete *wcqep; + + pring = wq->pring; + wcqep = &wcqe; + + /* Fake an ABORT error code back to cmpl routine */ + memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete)); + bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT); + wcqep->parameter = IOERR_ABORT_REQUESTED; + + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry_safe(nvmewqeq, next_nvmewqeq, + &wq->wqfull_list, list) { + if (ctxp) { + /* Checking for a specific IO to flush */ + if (nvmewqeq->context2 == ctxp) { + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, + iflags); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, + wcqep); + return; + } + continue; + } else { + /* Flush all IOs */ + list_del(&nvmewqeq->list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); + spin_lock_irqsave(&pring->ring_lock, iflags); + } + } + if (!ctxp) + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); +} + +void +lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, + struct lpfc_queue *wq) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_sli_ring *pring; + struct lpfc_iocbq *nvmewqeq; + unsigned long iflags; + int rc; + + /* + * Some WQE slots are available, so try to re-issue anything + * on the WQ wqfull_list. + */ + pring = wq->pring; + spin_lock_irqsave(&pring->ring_lock, iflags); + while (!list_empty(&wq->wqfull_list)) { + list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, + list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + spin_lock_irqsave(&pring->ring_lock, iflags); + if (rc == -EBUSY) { + /* WQ was full again, so put it back on the list */ + list_add(&nvmewqeq->list, &wq->wqfull_list); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + return; + } + } + wq->q_flag &= ~HBA_NVMET_WQFULL; + spin_unlock_irqrestore(&pring->ring_lock, iflags); + +#endif +} + void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_tgtport *tgtp; + struct lpfc_queue *wq; + uint32_t qidx; if (phba->nvmet_support == 0) return; if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + wq = phba->sli4_hba.nvme_wq[qidx]; + lpfc_nvmet_wqfull_flush(phba, wq, NULL); + } init_completion(&tgtp->tport_unreg_done); nvmet_fc_unregister_targetport(phba->targetport); wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); @@ -1694,6 +1948,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", oxid, size, smp_processor_id()); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + if (!ctx_buf) { /* Queue this NVME IO to process later */ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -1709,10 +1965,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, lpfc_post_rq_buffer( phba, phba->sli4_hba.nvmet_mrq_hdr[qno], phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); + + atomic_inc(&tgtp->defer_ctx); return; } - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; payload = (uint32_t *)(nvmebuf->dbuf.virt); sid = sli4_sid_from_fc_hdr(fc_hdr); @@ -1776,12 +2033,20 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, /* Processing of FCP command is deferred */ if (rc == -EOVERFLOW) { + /* + * Post a brand new DMA buffer to RQ and defer + * freeing rcv buffer till .defer_rcv callback + */ + qno = nvmebuf->idx; + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d from %06x\n", oxid, size, sid); - /* defer reposting rcv buffer till .defer_rcv callback */ - ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST; atomic_inc(&tgtp->rcv_fcp_cmd_out); + atomic_inc(&tgtp->defer_fod); return; } ctxp->rqb_buffer = nvmebuf; @@ -1897,7 +2162,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, { struct lpfc_nodelist *ndlp; struct lpfc_iocbq *nvmewqe; - union lpfc_wqe *wqe; + union lpfc_wqe128 *wqe; if (!lpfc_is_link_up(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, @@ -2023,9 +2288,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *nvmewqe; struct scatterlist *sgel; union lpfc_wqe128 *wqe; + struct ulp_bde64 *bde; uint32_t *txrdy; dma_addr_t physaddr; int i, cnt; + int do_pbde; int xc = 1; if (!lpfc_is_link_up(phba)) { @@ -2078,7 +2345,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (((ctxp->state == LPFC_NVMET_STE_RCV) && (ctxp->entry_cnt == 1)) || (ctxp->state == LPFC_NVMET_STE_DATA)) { - wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + wqe = &nvmewqe->wqe; } else { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6111 Wrong state NVMET FCP: %d cnt %d\n", @@ -2090,6 +2357,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, switch (rsp->op) { case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: + /* From the tsend template, initialize words 7 - 11 */ + memcpy(&wqe->words[7], + &lpfc_tsend_cmd_template.words[7], + sizeof(uint32_t) * 5); + /* Words 0 - 2 : The first sg segment */ sgel = &rsp->sg[0]; physaddr = sg_dma_address(sgel); @@ -2106,6 +2378,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, wqe->fcp_tsend.relative_offset = ctxp->offset; /* Word 5 */ + wqe->fcp_tsend.reserved = 0; /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, @@ -2113,9 +2386,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, nvmewqe->sli4_xritag); - /* Word 7 */ - bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); + /* Word 7 - set ar later */ /* Word 8 */ wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; @@ -2124,23 +2395,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); - /* Word 10 */ - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, - LPFC_WQE_LENLOC_WORD12); - bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); - if (phba->cfg_nvme_oas) - bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); + /* Word 10 - set wqes later, in template xc=1 */ + if (!xc) + bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); - /* Word 11 */ - bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, - LPFC_WQE_CQ_ID_DEFAULT); - bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, - FCP_COMMAND_TSEND); + /* Word 11 - set sup, irsp, irsplen later */ + do_pbde = 0; /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; @@ -2162,15 +2422,14 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, sgl++; if (rsp->op == NVMET_FCOP_READDATA_RSP) { atomic_inc(&tgtp->xmt_fcp_read_rsp); - bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); - if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && - (rsp->rsplen == 12)) { - bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); + + /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ + + if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { + if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + bf_set(wqe_sup, + &wqe->fcp_tsend.wqe_com, 1); } else { - bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, @@ -2181,15 +2440,17 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } else { atomic_inc(&tgtp->xmt_fcp_read); - bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); + /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); - bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); } break; case NVMET_FCOP_WRITEDATA: + /* From the treceive template, initialize words 3 - 11 */ + memcpy(&wqe->words[3], + &lpfc_treceive_cmd_template.words[3], + sizeof(uint32_t) * 9); + /* Words 0 - 2 : The first sg segment */ txrdy = dma_pool_alloc(phba->txrdy_payload_pool, GFP_KERNEL, &physaddr); @@ -2208,14 +2469,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, wqe->fcp_treceive.bde.addrHigh = cpu_to_le32(putPaddrHigh(physaddr)); - /* Word 3 */ - wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; - /* Word 4 */ wqe->fcp_treceive.relative_offset = ctxp->offset; - /* Word 5 */ - /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); @@ -2223,10 +2479,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->sli4_xritag); /* Word 7 */ - bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1); - bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, - CMD_FCP_TRECEIVE64_WQE); /* Word 8 */ wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; @@ -2235,26 +2487,17 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); - /* Word 10 */ - bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); - bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); - bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, - LPFC_WQE_LENLOC_WORD12); - bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); - bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); - if (phba->cfg_nvme_oas) - bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); + /* Word 10 - in template xc=1 */ + if (!xc) + bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); - /* Word 11 */ - bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, - LPFC_WQE_CQ_ID_DEFAULT); - bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, - FCP_COMMAND_TRECEIVE); - bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); + /* Word 11 - set pbde later */ + if (phba->nvme_embed_pbde) { + do_pbde = 1; + } else { + bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); + do_pbde = 0; + } /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; @@ -2282,6 +2525,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, break; case NVMET_FCOP_RSP: + /* From the treceive template, initialize words 4 - 11 */ + memcpy(&wqe->words[4], + &lpfc_trsp_cmd_template.words[4], + sizeof(uint32_t) * 8); + /* Words 0 - 2 */ physaddr = rsp->rspdma; wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; @@ -2294,12 +2542,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Word 3 */ wqe->fcp_trsp.response_len = rsp->rsplen; - /* Word 4 */ - wqe->fcp_trsp.rsvd_4_5[0] = 0; - - - /* Word 5 */ - /* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); @@ -2307,9 +2549,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->sli4_xritag); /* Word 7 */ - bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0); - bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); - bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); /* Word 8 */ wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; @@ -2319,35 +2558,23 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); /* Word 10 */ - bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); - bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, - LPFC_WQE_LENLOC_WORD3); - bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); - bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); - if (phba->cfg_nvme_oas) - bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); + if (xc) + bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1); /* Word 11 */ - bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, - LPFC_WQE_CQ_ID_DEFAULT); - bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, - FCP_COMMAND_TRSP); - bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); - - if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - /* Good response - all zero's on wire */ - bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); - bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); - bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); - } else { + /* In template wqes=0 irsp=0 irsplen=0 - good response */ + if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) { + /* Bad response - embed it */ bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, ((rsp->rsplen >> 2) - 1)); memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); } + do_pbde = 0; + + /* Word 12 */ + wqe->fcp_trsp.rsvd_12_15[0] = 0; /* Use rspbuf, NOT sg list */ rsp->sg_cnt = 0; @@ -2380,6 +2607,17 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(cnt); + if (do_pbde && i == 0) { + bde = (struct ulp_bde64 *)&wqe->words[13]; + memset(bde, 0, sizeof(struct ulp_bde64)); + /* Words 13-15 (PBDE)*/ + bde->addrLow = sgl->addr_lo; + bde->addrHigh = sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } sgl++; ctxp->offset += cnt; } @@ -2597,7 +2835,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; - union lpfc_wqe *wqe_abts; + union lpfc_wqe128 *wqe_abts; struct lpfc_nodelist *ndlp; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, @@ -2692,7 +2930,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; - union lpfc_wqe *abts_wqe; + union lpfc_wqe128 *abts_wqe; struct lpfc_nodelist *ndlp; unsigned long flags; int rc; @@ -2882,7 +3120,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; - union lpfc_wqe *wqe_abts; + union lpfc_wqe128 *wqe_abts; unsigned long flags; int rc; |