diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 2759 |
1 files changed, 1292 insertions, 1467 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 430abebf99f1..20d40957a385 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -70,8 +70,9 @@ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, uint8_t *, uint32_t *); -static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, - struct lpfc_iocbq *); +static struct lpfc_iocbq * +lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, + struct lpfc_iocbq *rspiocbq); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, @@ -89,17 +90,14 @@ static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe); +static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, + struct lpfc_iocbq *pwqeq, + struct lpfc_sglq *sglq); union lpfc_wqe128 lpfc_iread_cmd_template; union lpfc_wqe128 lpfc_iwrite_cmd_template; union lpfc_wqe128 lpfc_icmnd_cmd_template; -static IOCB_t * -lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) -{ - return &iocbq->iocb; -} - /* Setup WQE templates for IOs */ void lpfc_wqe_cmd_template(void) { @@ -1251,24 +1249,19 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) struct lpfc_sglq *start_sglq = NULL; struct lpfc_io_buf *lpfc_cmd; struct lpfc_nodelist *ndlp; - struct lpfc_sli_ring *pring = NULL; int found = 0; + u8 cmnd; - if (piocbq->iocb_flag & LPFC_IO_NVME_LS) - pring = phba->sli4_hba.nvmels_wq->pring; - else - pring = lpfc_phba_elsring(phba); - - lockdep_assert_held(&pring->ring_lock); + cmnd = get_job_cmnd(phba, piocbq); - if (piocbq->iocb_flag & LPFC_IO_FCP) { + if (piocbq->cmd_flag & LPFC_IO_FCP) { lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; ndlp = lpfc_cmd->rdata->pnode; - } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && - !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { + } else if ((cmnd == CMD_GEN_REQUEST64_CR) && + !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) { ndlp = piocbq->context_un.ndlp; - } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { - if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) + } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) { + if (piocbq->cmd_flag & LPFC_IO_LOOPBACK) ndlp = NULL; else ndlp = piocbq->context_un.ndlp; @@ -1391,7 +1384,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) if (sglq) { - if (iocbq->iocb_flag & LPFC_IO_NVMET) { + if (iocbq->cmd_flag & LPFC_IO_NVMET) { spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); sglq->state = SGL_FREED; @@ -1403,7 +1396,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) goto out; } - if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && + if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) && (!(unlikely(pci_channel_offline(phba->pcidev)))) && sglq->state != SGL_XRI_ABORTED) { spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, @@ -1440,7 +1433,7 @@ out: memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; - iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | + iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | LPFC_IO_NVME_LS); list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1530,17 +1523,21 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); - if (piocb->wqe_cmpl) { - if (piocb->iocb_flag & LPFC_IO_NVME) + if (piocb->cmd_cmpl) { + if (piocb->cmd_flag & LPFC_IO_NVME) { lpfc_nvme_cancel_iocb(phba, piocb, ulpstatus, ulpWord4); - else - lpfc_sli_release_iocbq(phba, piocb); - - } else if (piocb->iocb_cmpl) { - piocb->iocb.ulpStatus = ulpstatus; - piocb->iocb.un.ulpWord[4] = ulpWord4; - (piocb->iocb_cmpl) (phba, piocb, piocb); + } else { + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(lpfc_wcqe_c_status, + &piocb->wcqe_cmpl, ulpstatus); + piocb->wcqe_cmpl.parameter = ulpWord4; + } else { + piocb->iocb.ulpStatus = ulpstatus; + piocb->iocb.un.ulpWord[4] = ulpWord4; + } + (piocb->cmd_cmpl) (phba, piocb, piocb); + } } else { lpfc_sli_release_iocbq(phba, piocb); } @@ -1724,20 +1721,18 @@ static int lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { - if (phba->sli_rev == LPFC_SLI_REV4) - lockdep_assert_held(&pring->ring_lock); - else - lockdep_assert_held(&phba->hbalock); + u32 ulp_command = 0; BUG_ON(!piocb); + ulp_command = get_job_cmnd(phba, piocb); list_add_tail(&piocb->list, &pring->txcmplq); - piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; + piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt++; - if ((unlikely(pring->ringno == LPFC_ELS_RING)) && - (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && - (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + (ulp_command != CMD_ABORT_XRI_WQE) && + (ulp_command != CMD_ABORT_XRI_CN) && + (ulp_command != CMD_CLOSE_XRI_CN)) { BUG_ON(!piocb->vport); if (!(piocb->vport->load_flag & FC_UNLOADING)) mod_timer(&piocb->vport->els_tmofunc, @@ -1773,7 +1768,7 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to driver command iocb object. - * @cmf_cmpl: Pointer to completed WCQE. + * @rspiocb: Pointer to driver response iocb object. * * This routine will inform the driver of any BW adjustments we need * to make. These changes will be picked up during the next CMF @@ -1782,10 +1777,11 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) **/ static void lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *cmf_cmpl) + struct lpfc_iocbq *rspiocb) { union lpfc_wqe128 *wqe; uint32_t status, info; + struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl; uint64_t bw, bwdif, slop; uint64_t pcent, bwpcent; int asig, afpin, sigcnt, fpincnt; @@ -1793,22 +1789,22 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, char *s; /* First check for error */ - status = bf_get(lpfc_wcqe_c_status, cmf_cmpl); + status = bf_get(lpfc_wcqe_c_status, wcqe); if (status) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6211 CMF_SYNC_WQE Error " "req_tag x%x status x%x hwstatus x%x " "tdatap x%x parm x%x\n", - bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl), - bf_get(lpfc_wcqe_c_status, cmf_cmpl), - bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl), - cmf_cmpl->total_data_placed, - cmf_cmpl->parameter); + bf_get(lpfc_wcqe_c_request_tag, wcqe), + bf_get(lpfc_wcqe_c_status, wcqe), + bf_get(lpfc_wcqe_c_hw_status, wcqe), + wcqe->total_data_placed, + wcqe->parameter); goto out; } /* Gather congestion information on a successful cmpl */ - info = cmf_cmpl->parameter; + info = wcqe->parameter; phba->cmf_active_info = info; /* See if firmware info count is valid or has changed */ @@ -1817,15 +1813,15 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else phba->cmf_info_per_interval = info; - tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl); - cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl); + tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe); + cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe); /* Get BW requirement from firmware */ bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE; if (!bw) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6212 CMF_SYNC_WQE x%x: NULL bw\n", - bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl)); + bf_get(lpfc_wcqe_c_request_tag, wcqe)); goto out; } @@ -1999,14 +1995,13 @@ initpath: bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT); sync_buf->vport = phba->pport; - sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl; - sync_buf->iocb_cmpl = NULL; + sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl; sync_buf->context1 = NULL; sync_buf->context2 = NULL; sync_buf->context3 = NULL; sync_buf->sli4_xritag = NO_XRI; - sync_buf->iocb_flag |= LPFC_IO_CMF; + sync_buf->cmd_flag |= LPFC_IO_CMF; ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf); if (ret_val) lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, @@ -2173,7 +2168,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * Set up an iotag */ - nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; + nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0; if (pring->ringno == LPFC_ELS_RING) { @@ -2194,9 +2189,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* * If there is no completion routine to call, we can release the * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, - * that have no rsp ring completion, iocb_cmpl MUST be NULL. + * that have no rsp ring completion, cmd_cmpl MUST be NULL. */ - if (nextiocb->iocb_cmpl) + if (nextiocb->cmd_cmpl) lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); else __lpfc_sli_release_iocbq(phba, nextiocb); @@ -3359,6 +3354,56 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return 0; } +static void +lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba, + struct lpfc_iocbq *saveq) +{ + IOCB_t *irsp; + union lpfc_wqe128 *wqe; + u16 i = 0; + + irsp = &saveq->iocb; + wqe = &saveq->wqe; + + /* Fill wcqe with the IOCB status fields */ + bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus); + saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount; + saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4]; + saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len; + + /* Source ID */ + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo); + + /* rx-id of the response frame */ + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext); + + /* ox-id of the frame */ + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + irsp->unsli3.rcvsli3.ox_id); + + /* DID */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, + irsp->un.rcvels.remoteID); + + /* unsol data len */ + for (i = 0; i < irsp->ulpBdeCount; i++) { + struct lpfc_hbq_entry *hbqe = NULL; + + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + if (i == 0) { + hbqe = (struct lpfc_hbq_entry *) + &irsp->un.ulpWord[0]; + saveq->wqe.gen_req.bde.tus.f.bdeSize = + hbqe->bde.tus.f.bdeSize; + } else if (i == 1) { + hbqe = (struct lpfc_hbq_entry *) + &irsp->unsli3.sli3Words[4]; + saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize; + } + } + } +} + /** * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler * @phba: Pointer to HBA context object. @@ -3379,11 +3424,13 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { IOCB_t * irsp; WORD5 * w5p; + dma_addr_t paddr; uint32_t Rctl, Type; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; - irsp = &(saveq->iocb); + irsp = &saveq->iocb; + saveq->vport = phba->pport; if (irsp->ulpCommand == CMD_ASYNC_STATUS) { if (pring->lpfc_sli_rcv_async_status) @@ -3401,22 +3448,22 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && - (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { + (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { if (irsp->ulpBdeCount > 0) { dmzbuf = lpfc_sli_get_buff(phba, pring, - irsp->un.ulpWord[3]); + irsp->un.ulpWord[3]); lpfc_in_buf_free(phba, dmzbuf); } if (irsp->ulpBdeCount > 1) { dmzbuf = lpfc_sli_get_buff(phba, pring, - irsp->unsli3.sli3Words[3]); + irsp->unsli3.sli3Words[3]); lpfc_in_buf_free(phba, dmzbuf); } if (irsp->ulpBdeCount > 2) { dmzbuf = lpfc_sli_get_buff(phba, pring, - irsp->unsli3.sli3Words[7]); + irsp->unsli3.sli3Words[7]); lpfc_in_buf_free(phba, dmzbuf); } @@ -3449,9 +3496,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, irsp->unsli3.sli3Words[7]); } list_for_each_entry(iocbq, &saveq->list, list) { - irsp = &(iocbq->iocb); + irsp = &iocbq->iocb; if (irsp->ulpBdeCount != 0) { - iocbq->context2 = lpfc_sli_get_buff(phba, pring, + iocbq->context2 = lpfc_sli_get_buff(phba, + pring, irsp->un.ulpWord[3]); if (!iocbq->context2) lpfc_printf_log(phba, @@ -3463,7 +3511,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, irsp->un.ulpWord[3]); } if (irsp->ulpBdeCount == 2) { - iocbq->context3 = lpfc_sli_get_buff(phba, pring, + iocbq->context3 = lpfc_sli_get_buff(phba, + pring, irsp->unsli3.sli3Words[7]); if (!iocbq->context3) lpfc_printf_log(phba, @@ -3476,7 +3525,20 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, irsp->unsli3.sli3Words[7]); } } + } else { + paddr = getPaddr(irsp->un.cont64[0].addrHigh, + irsp->un.cont64[0].addrLow); + saveq->context2 = lpfc_sli_ringpostbuf_get(phba, pring, + paddr); + if (irsp->ulpBdeCount == 2) { + paddr = getPaddr(irsp->un.cont64[1].addrHigh, + irsp->un.cont64[1].addrLow); + saveq->context3 = lpfc_sli_ringpostbuf_get(phba, + pring, + paddr); + } } + if (irsp->ulpBdeCount != 0 && (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { @@ -3494,12 +3556,14 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (!found) list_add_tail(&saveq->clist, &pring->iocb_continue_saveq); + if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { list_del_init(&iocbq->clist); saveq = iocbq; - irsp = &(saveq->iocb); - } else + irsp = &saveq->iocb; + } else { return 0; + } } if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || @@ -3522,6 +3586,19 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX || + irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { + if (irsp->unsli3.rcvsli3.vpi == 0xffff) + saveq->vport = phba->pport; + else + saveq->vport = lpfc_find_vport_by_vpid(phba, + irsp->unsli3.rcvsli3.vpi); + } + + /* Prepare WQE with Unsol frame */ + lpfc_sli_prep_unsol_wqe(phba, saveq); + if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0313 Ring %d handler: unexpected Rctl x%x " @@ -3550,36 +3627,28 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, struct lpfc_iocbq *prspiocb) { struct lpfc_iocbq *cmd_iocb = NULL; - uint16_t iotag; - spinlock_t *temp_lock = NULL; - unsigned long iflag = 0; + u16 iotag; if (phba->sli_rev == LPFC_SLI_REV4) - temp_lock = &pring->ring_lock; + iotag = get_wqe_reqtag(prspiocb); else - temp_lock = &phba->hbalock; - - spin_lock_irqsave(temp_lock, iflag); - iotag = prspiocb->iocb.ulpIoTag; + iotag = prspiocb->iocb.ulpIoTag; if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; - if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { + if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); - cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; - spin_unlock_irqrestore(temp_lock, iflag); return cmd_iocb; } } - spin_unlock_irqrestore(temp_lock, iflag); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0317 iotag x%x is out of " - "range: max iotag x%x wd0 x%x\n", - iotag, phba->sli.last_iotag, - *(((uint32_t *) &prspiocb->iocb) + 7)); + "range: max iotag x%x\n", + iotag, phba->sli.last_iotag); return NULL; } @@ -3600,33 +3669,23 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t iotag) { struct lpfc_iocbq *cmd_iocb = NULL; - spinlock_t *temp_lock = NULL; - unsigned long iflag = 0; - - if (phba->sli_rev == LPFC_SLI_REV4) - temp_lock = &pring->ring_lock; - else - temp_lock = &phba->hbalock; - spin_lock_irqsave(temp_lock, iflag); if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; - if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { + if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) { /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); - cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; pring->txcmplq_cnt--; - spin_unlock_irqrestore(temp_lock, iflag); return cmd_iocb; } } - spin_unlock_irqrestore(temp_lock, iflag); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0372 iotag x%x lookup error: max iotag (x%x) " - "iocb_flag x%x\n", + "cmd_flag x%x\n", iotag, phba->sli.last_iotag, - cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); + cmd_iocb ? cmd_iocb->cmd_flag : 0xffff); return NULL; } @@ -3654,18 +3713,29 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *cmdiocbp; int rc = 1; unsigned long iflag; + u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); + + ulp_command = get_job_cmnd(phba, saveq); + ulp_status = get_job_ulpstatus(phba, saveq); + ulp_word4 = get_job_word4(phba, saveq); + ulp_context = get_job_ulpcontext(phba, saveq); + if (phba->sli_rev == LPFC_SLI_REV4) + iotag = get_wqe_reqtag(saveq); + else + iotag = saveq->iocb.ulpIoTag; + if (cmdiocbp) { - if (cmdiocbp->iocb_cmpl) { + ulp_command = get_job_cmnd(phba, cmdiocbp); + if (cmdiocbp->cmd_cmpl) { /* * If an ELS command failed send an event to mgmt * application. */ - if (saveq->iocb.ulpStatus && + if (ulp_status && (pring->ringno == LPFC_ELS_RING) && - (cmdiocbp->iocb.ulpCommand == - CMD_ELS_REQUEST64_CR)) + (ulp_command == CMD_ELS_REQUEST64_CR)) lpfc_send_els_failure_event(phba, cmdiocbp, saveq); @@ -3675,11 +3745,11 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ if (pring->ringno == LPFC_ELS_RING) { if ((phba->sli_rev < LPFC_SLI_REV4) && - (cmdiocbp->iocb_flag & + (cmdiocbp->cmd_flag & LPFC_DRIVER_ABORTED)) { spin_lock_irqsave(&phba->hbalock, iflag); - cmdiocbp->iocb_flag &= + cmdiocbp->cmd_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3694,12 +3764,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ spin_lock_irqsave(&phba->hbalock, iflag); - saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; + saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; spin_unlock_irqrestore(&phba->hbalock, iflag); } if (phba->sli_rev == LPFC_SLI_REV4) { - if (saveq->iocb_flag & + if (saveq->cmd_flag & LPFC_EXCHANGE_BUSY) { /* Set cmdiocb flag for the * exchange busy so sgl (xri) @@ -3709,12 +3779,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ spin_lock_irqsave( &phba->hbalock, iflag); - cmdiocbp->iocb_flag |= + cmdiocbp->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore( &phba->hbalock, iflag); } - if (cmdiocbp->iocb_flag & + if (cmdiocbp->cmd_flag & LPFC_DRIVER_ABORTED) { /* * Clear LPFC_DRIVER_ABORTED @@ -3723,34 +3793,34 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ spin_lock_irqsave( &phba->hbalock, iflag); - cmdiocbp->iocb_flag &= + cmdiocbp->cmd_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irqrestore( &phba->hbalock, iflag); - cmdiocbp->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - cmdiocbp->iocb.un.ulpWord[4] = - IOERR_ABORT_REQUESTED; + set_job_ulpstatus(cmdiocbp, + IOSTAT_LOCAL_REJECT); + set_job_ulpword4(cmdiocbp, + IOERR_ABORT_REQUESTED); /* * For SLI4, irsiocb contains * NO_XRI in sli_xritag, it * shall not affect releasing * sgl (xri) process. */ - saveq->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - saveq->iocb.un.ulpWord[4] = - IOERR_SLI_ABORTED; + set_job_ulpstatus(saveq, + IOSTAT_LOCAL_REJECT); + set_job_ulpword4(saveq, + IOERR_SLI_ABORTED); spin_lock_irqsave( &phba->hbalock, iflag); - saveq->iocb_flag |= + saveq->cmd_flag |= LPFC_DELAY_MEM_FREE; spin_unlock_irqrestore( &phba->hbalock, iflag); } } } - (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); + (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { @@ -3768,12 +3838,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "0322 Ring %d handler: " "unexpected completion IoTag x%x " "Data: x%x x%x x%x x%x\n", - pring->ringno, - saveq->iocb.ulpIoTag, - saveq->iocb.ulpStatus, - saveq->iocb.un.ulpWord[4], - saveq->iocb.ulpCommand, - saveq->iocb.ulpContext); + pring->ringno, iotag, ulp_status, + ulp_word4, ulp_command, ulp_context); } } @@ -3992,11 +4058,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, spin_lock_irqsave(&phba->hbalock, iflag); if (unlikely(!cmdiocbq)) break; - if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) - cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; - if (cmdiocbq->iocb_cmpl) { + if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) + cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + if (cmdiocbq->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, + (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); } @@ -4088,155 +4154,159 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *rspiocbp) { struct lpfc_iocbq *saveq; - struct lpfc_iocbq *cmdiocbp; + struct lpfc_iocbq *cmdiocb; struct lpfc_iocbq *next_iocb; - IOCB_t *irsp = NULL; + IOCB_t *irsp; uint32_t free_saveq; - uint8_t iocb_cmd_type; + u8 cmd_type; lpfc_iocb_type type; unsigned long iflag; + u32 ulp_status = get_job_ulpstatus(phba, rspiocbp); + u32 ulp_word4 = get_job_word4(phba, rspiocbp); + u32 ulp_command = get_job_cmnd(phba, rspiocbp); int rc; spin_lock_irqsave(&phba->hbalock, iflag); /* First add the response iocb to the countinueq list */ - list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); + list_add_tail(&rspiocbp->list, &pring->iocb_continueq); pring->iocb_continueq_cnt++; - /* Now, determine whether the list is completed for processing */ - irsp = &rspiocbp->iocb; - if (irsp->ulpLe) { - /* - * By default, the driver expects to free all resources - * associated with this iocb completion. - */ - free_saveq = 1; - saveq = list_get_first(&pring->iocb_continueq, - struct lpfc_iocbq, list); - irsp = &(saveq->iocb); - list_del_init(&pring->iocb_continueq); - pring->iocb_continueq_cnt = 0; + /* + * By default, the driver expects to free all resources + * associated with this iocb completion. + */ + free_saveq = 1; + saveq = list_get_first(&pring->iocb_continueq, + struct lpfc_iocbq, list); + list_del_init(&pring->iocb_continueq); + pring->iocb_continueq_cnt = 0; - pring->stats.iocb_rsp++; + pring->stats.iocb_rsp++; - /* - * If resource errors reported from HBA, reduce - * queuedepths of the SCSI device. - */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == - IOERR_NO_RESOURCES)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - phba->lpfc_rampdown_queue_depth(phba); - spin_lock_irqsave(&phba->hbalock, iflag); - } + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if (ulp_status == IOSTAT_LOCAL_REJECT && + ((ulp_word4 & IOERR_PARAM_MASK) == + IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } - if (irsp->ulpStatus) { - /* Rsp ring <ringno> error: IOCB */ + if (ulp_status) { + /* Rsp ring <ringno> error: IOCB */ + if (phba->sli_rev < LPFC_SLI_REV4) { + irsp = &rspiocbp->iocb; + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0328 Rsp Ring %d error: ulp_status x%x " + "IOCB Data: " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x " + "x%08x x%08x x%08x x%08x\n", + pring->ringno, ulp_status, + get_job_ulpword(rspiocbp, 0), + get_job_ulpword(rspiocbp, 1), + get_job_ulpword(rspiocbp, 2), + get_job_ulpword(rspiocbp, 3), + get_job_ulpword(rspiocbp, 4), + get_job_ulpword(rspiocbp, 5), + *(((uint32_t *)irsp) + 6), + *(((uint32_t *)irsp) + 7), + *(((uint32_t *)irsp) + 8), + *(((uint32_t *)irsp) + 9), + *(((uint32_t *)irsp) + 10), + *(((uint32_t *)irsp) + 11), + *(((uint32_t *)irsp) + 12), + *(((uint32_t *)irsp) + 13), + *(((uint32_t *)irsp) + 14), + *(((uint32_t *)irsp) + 15)); + } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0328 Rsp Ring %d error: " + "0321 Rsp Ring %d error: " "IOCB Data: " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " "x%x x%x x%x x%x\n", pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7), - *(((uint32_t *) irsp) + 8), - *(((uint32_t *) irsp) + 9), - *(((uint32_t *) irsp) + 10), - *(((uint32_t *) irsp) + 11), - *(((uint32_t *) irsp) + 12), - *(((uint32_t *) irsp) + 13), - *(((uint32_t *) irsp) + 14), - *(((uint32_t *) irsp) + 15)); + rspiocbp->wcqe_cmpl.word0, + rspiocbp->wcqe_cmpl.total_data_placed, + rspiocbp->wcqe_cmpl.parameter, + rspiocbp->wcqe_cmpl.word3); } + } - /* - * Fetch the IOCB command type and call the correct completion - * routine. Solicited and Unsolicited IOCBs on the ELS ring - * get freed back to the lpfc_iocb_list by the discovery - * kernel thread. - */ - iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; - type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); - switch (type) { - case LPFC_SOL_IOCB: - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - break; - - case LPFC_UNSOL_IOCB: - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - if (!rc) - free_saveq = 0; - break; - case LPFC_ABORT_IOCB: - cmdiocbp = NULL; - if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { + /* + * Fetch the iocb command type and call the correct completion + * routine. Solicited and Unsolicited IOCBs on the ELS ring + * get freed back to the lpfc_iocb_list by the discovery + * kernel thread. + */ + cmd_type = ulp_command & CMD_IOCB_MASK; + type = lpfc_sli_iocb_cmd_type(cmd_type); + switch (type) { + case LPFC_SOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + break; + case LPFC_UNSOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; + break; + case LPFC_ABORT_IOCB: + cmdiocb = NULL; + if (ulp_command != CMD_XRI_ABORTED_CX) + cmdiocb = lpfc_sli_iocbq_lookup(phba, pring, + saveq); + if (cmdiocb) { + /* Call the specified completion routine */ + if (cmdiocb->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); - cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, - saveq); + cmdiocb->cmd_cmpl(phba, cmdiocb, saveq); spin_lock_irqsave(&phba->hbalock, iflag); - } - if (cmdiocbp) { - /* Call the specified completion routine */ - if (cmdiocbp->iocb_cmpl) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); - (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, - saveq); - spin_lock_irqsave(&phba->hbalock, - iflag); - } else - __lpfc_sli_release_iocbq(phba, - cmdiocbp); - } - break; - - case LPFC_UNKNOWN_IOCB: - if (irsp->ulpCommand == CMD_ADAPTER_MSG) { - char adaptermsg[LPFC_MAX_ADPTMSG]; - memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); - memcpy(&adaptermsg[0], (uint8_t *)irsp, - MAX_MSG_DATA); - dev_warn(&((phba->pcidev)->dev), - "lpfc%d: %s\n", - phba->brd_no, adaptermsg); } else { - /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0335 Unknown IOCB " - "command Data: x%x " - "x%x x%x x%x\n", - irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); + __lpfc_sli_release_iocbq(phba, cmdiocb); } - break; } + break; + case LPFC_UNKNOWN_IOCB: + if (ulp_command == CMD_ADAPTER_MSG) { + char adaptermsg[LPFC_MAX_ADPTMSG]; + + memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown command */ + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + ulp_command, + ulp_status, + get_wqe_reqtag(rspiocbp), + get_job_ulpcontext(phba, rspiocbp)); + } + break; + } - if (free_saveq) { - list_for_each_entry_safe(rspiocbp, next_iocb, - &saveq->list, list) { - list_del_init(&rspiocbp->list); - __lpfc_sli_release_iocbq(phba, rspiocbp); - } - __lpfc_sli_release_iocbq(phba, saveq); + if (free_saveq) { + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del_init(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, rspiocbp); } - rspiocbp = NULL; + __lpfc_sli_release_iocbq(phba, saveq); } + rspiocbp = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); return rspiocbp; } @@ -4429,8 +4499,8 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, irspiocbq = container_of(cq_event, struct lpfc_iocbq, cq_event); /* Translate ELS WCQE to response IOCBQ */ - irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, - irspiocbq); + irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba, + irspiocbq); if (irspiocbq) lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); @@ -4573,7 +4643,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) list_splice_init(&pring->txq, &txq); list_for_each_entry_safe(piocb, next_iocb, &pring->txcmplq, list) - piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; /* Retrieve everything on the txcmplq */ list_splice_init(&pring->txcmplq, &txcmplq); pring->txq_cnt = 0; @@ -4599,7 +4669,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) list_splice_init(&pring->txq, &txq); list_for_each_entry_safe(piocb, next_iocb, &pring->txcmplq, list) - piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ; /* Retrieve everything on the txcmplq */ list_splice_init(&pring->txcmplq, &txcmplq); pring->txq_cnt = 0; @@ -10098,7 +10168,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, lockdep_assert_held(&phba->hbalock); - if (piocb->iocb_cmpl && (!piocb->vport) && + if (piocb->cmd_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -10150,10 +10220,10 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, case CMD_QUE_RING_BUF64_CN: /* * For IOCBs, like QUE_RING_BUF, that have no rsp ring - * completion, iocb_cmpl MUST be 0. + * completion, cmd_cmpl MUST be 0. */ - if (piocb->iocb_cmpl) - piocb->iocb_cmpl = NULL; + if (piocb->cmd_cmpl) + piocb->cmd_cmpl = NULL; fallthrough; case CMD_CREATE_XRI_CR: case CMD_CLOSE_XRI_CN: @@ -10200,715 +10270,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, } /** - * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. - * @phba: Pointer to HBA context object. - * @piocbq: Pointer to command iocb. - * @sglq: Pointer to the scatter gather queue object. - * - * This routine converts the bpl or bde that is in the IOCB - * to a sgl list for the sli4 hardware. The physical address - * of the bpl/bde is converted back to a virtual address. - * If the IOCB contains a BPL then the list of BDE's is - * converted to sli4_sge's. If the IOCB contains a single - * BDE then it is converted to a single sli_sge. - * The IOCB is still in cpu endianess so the contents of - * the bpl can be used without byte swapping. - * - * Returns valid XRI = Success, NO_XRI = Failure. -**/ -static uint16_t -lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, - struct lpfc_sglq *sglq) -{ - uint16_t xritag = NO_XRI; - struct ulp_bde64 *bpl = NULL; - struct ulp_bde64 bde; - struct sli4_sge *sgl = NULL; - struct lpfc_dmabuf *dmabuf; - IOCB_t *icmd; - int numBdes = 0; - int i = 0; - uint32_t offset = 0; /* accumulated offset in the sg request list */ - int inbound = 0; /* number of sg reply entries inbound from firmware */ - - if (!piocbq || !sglq) - return xritag; - - sgl = (struct sli4_sge *)sglq->sgl; - icmd = &piocbq->iocb; - if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) - return sglq->sli4_xritag; - if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { - numBdes = icmd->un.genreq64.bdl.bdeSize / - sizeof(struct ulp_bde64); - /* The addrHigh and addrLow fields within the IOCB - * have not been byteswapped yet so there is no - * need to swap them back. - */ - if (piocbq->context3) - dmabuf = (struct lpfc_dmabuf *)piocbq->context3; - else - return xritag; - - bpl = (struct ulp_bde64 *)dmabuf->virt; - if (!bpl) - return xritag; - - for (i = 0; i < numBdes; i++) { - /* Should already be byte swapped. */ - sgl->addr_hi = bpl->addrHigh; - sgl->addr_lo = bpl->addrLow; - - sgl->word2 = le32_to_cpu(sgl->word2); - if ((i+1) == numBdes) - bf_set(lpfc_sli4_sge_last, sgl, 1); - else - bf_set(lpfc_sli4_sge_last, sgl, 0); - /* swap the size field back to the cpu so we - * can assign it to the sgl. - */ - bde.tus.w = le32_to_cpu(bpl->tus.w); - sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); - /* The offsets in the sgl need to be accumulated - * separately for the request and reply lists. - * The request is always first, the reply follows. - */ - if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { - /* add up the reply sg entries */ - if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) - inbound++; - /* first inbound? reset the offset */ - if (inbound == 1) - offset = 0; - bf_set(lpfc_sli4_sge_offset, sgl, offset); - bf_set(lpfc_sli4_sge_type, sgl, - LPFC_SGE_TYPE_DATA); - offset += bde.tus.f.bdeSize; - } - sgl->word2 = cpu_to_le32(sgl->word2); - bpl++; - sgl++; - } - } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { - /* The addrHigh and addrLow fields of the BDE have not - * been byteswapped yet so they need to be swapped - * before putting them in the sgl. - */ - sgl->addr_hi = - cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); - sgl->addr_lo = - cpu_to_le32(icmd->un.genreq64.bdl.addrLow); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = - cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); - } - return sglq->sli4_xritag; -} - -/** - * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry. - * @phba: Pointer to HBA context object. - * @iocbq: Pointer to command iocb. - * @wqe: Pointer to the work queue entry. - * - * This routine converts the iocb command to its Work Queue Entry - * equivalent. The wqe pointer should not have any fields set when - * this routine is called because it will memcpy over them. - * This routine does not set the CQ_ID or the WQEC bits in the - * wqe. - * - * Returns: 0 = Success, IOCB_ERROR = Failure. - **/ -static int -lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, - union lpfc_wqe128 *wqe) -{ - uint32_t xmit_len = 0, total_len = 0; - uint8_t ct = 0; - uint32_t fip; - uint32_t abort_tag; - uint8_t command_type = ELS_COMMAND_NON_FIP; - uint8_t cmnd; - uint16_t xritag; - uint16_t abrt_iotag; - struct lpfc_iocbq *abrtiocbq; - struct ulp_bde64 *bpl = NULL; - uint32_t els_id = LPFC_ELS_ID_DEFAULT; - int numBdes, i; - struct ulp_bde64 bde; - struct lpfc_nodelist *ndlp; - uint32_t *pcmd; - uint32_t if_type; - - fip = phba->hba_flag & HBA_FIP_SUPPORT; - /* The fcp commands will set command type */ - if (iocbq->iocb_flag & LPFC_IO_FCP) - command_type = FCP_COMMAND; - else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) - command_type = ELS_COMMAND_FIP; - else - command_type = ELS_COMMAND_NON_FIP; - - if (phba->fcp_embed_io) - memset(wqe, 0, sizeof(union lpfc_wqe128)); - /* Some of the fields are in the right position already */ - memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); - /* The ct field has moved so reset */ - wqe->generic.wqe_com.word7 = 0; - wqe->generic.wqe_com.word10 = 0; - - abort_tag = (uint32_t) iocbq->iotag; - xritag = iocbq->sli4_xritag; - /* words0-2 bpl convert bde */ - if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { - numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / - sizeof(struct ulp_bde64); - bpl = (struct ulp_bde64 *) - ((struct lpfc_dmabuf *)iocbq->context3)->virt; - if (!bpl) - return IOCB_ERROR; - - /* Should already be byte swapped. */ - wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); - wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); - /* swap the size field back to the cpu so we - * can assign it to the sgl. - */ - wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); - xmit_len = wqe->generic.bde.tus.f.bdeSize; - total_len = 0; - for (i = 0; i < numBdes; i++) { - bde.tus.w = le32_to_cpu(bpl[i].tus.w); - total_len += bde.tus.f.bdeSize; - } - } else - xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; - - iocbq->iocb.ulpIoTag = iocbq->iotag; - cmnd = iocbq->iocb.ulpCommand; - - switch (iocbq->iocb.ulpCommand) { - case CMD_ELS_REQUEST64_CR: - if (iocbq->iocb_flag & LPFC_IO_LIBDFC) - ndlp = iocbq->context_un.ndlp; - else - ndlp = (struct lpfc_nodelist *)iocbq->context1; - if (!iocbq->iocb.ulpLe) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2007 Only Limited Edition cmd Format" - " supported 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - - wqe->els_req.payload_len = xmit_len; - /* Els_reguest64 has a TMO */ - bf_set(wqe_tmo, &wqe->els_req.wqe_com, - iocbq->iocb.ulpTimeout); - /* Need a VF for word 4 set the vf bit*/ - bf_set(els_req64_vf, &wqe->els_req, 0); - /* And a VFID for word 12 */ - bf_set(els_req64_vfid, &wqe->els_req, 0); - ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, - iocbq->iocb.ulpContext); - bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); - bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); - /* CCP CCPE PV PRI in word10 were set in the memcpy */ - if (command_type == ELS_COMMAND_FIP) - els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) - >> LPFC_FIP_ELS_ID_SHIFT); - pcmd = (uint32_t *) (((struct lpfc_dmabuf *) - iocbq->context2)->virt); - if_type = bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf); - if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { - if (pcmd && (*pcmd == ELS_CMD_FLOGI || - *pcmd == ELS_CMD_SCR || - *pcmd == ELS_CMD_RDF || - *pcmd == ELS_CMD_EDC || - *pcmd == ELS_CMD_RSCN_XMT || - *pcmd == ELS_CMD_FDISC || - *pcmd == ELS_CMD_LOGO || - *pcmd == ELS_CMD_QFPA || - *pcmd == ELS_CMD_UVEM || - *pcmd == ELS_CMD_PLOGI)) { - bf_set(els_req64_sp, &wqe->els_req, 1); - bf_set(els_req64_sid, &wqe->els_req, - iocbq->vport->fc_myDID); - if ((*pcmd == ELS_CMD_FLOGI) && - !(phba->fc_topology == - LPFC_TOPOLOGY_LOOP)) - bf_set(els_req64_sid, &wqe->els_req, 0); - bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); - bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, - phba->vpi_ids[iocbq->vport->vpi]); - } else if (pcmd && iocbq->context1) { - bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); - bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, - phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); - } - } - bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, - phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); - bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); - bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); - bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); - bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); - bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); - wqe->els_req.max_response_payload_len = total_len - xmit_len; - break; - case CMD_XMIT_SEQUENCE64_CX: - bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, - iocbq->iocb.un.ulpWord[3]); - bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, - iocbq->iocb.unsli3.rcvsli3.ox_id); - /* The entire sequence is transmitted for this IOCB */ - xmit_len = total_len; - cmnd = CMD_XMIT_SEQUENCE64_CR; - if (phba->link_flag & LS_LOOPBACK_MODE) - bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); - fallthrough; - case CMD_XMIT_SEQUENCE64_CR: - /* word3 iocb=io_tag32 wqe=reserved */ - wqe->xmit_sequence.rsvd3 = 0; - /* word4 relative_offset memcpy */ - /* word5 r_ctl/df_ctl memcpy */ - bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); - bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); - bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, - LPFC_WQE_IOD_WRITE); - bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, - LPFC_WQE_LENLOC_WORD12); - bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); - wqe->xmit_sequence.xmit_len = xmit_len; - command_type = OTHER_COMMAND; - break; - case CMD_XMIT_BCAST64_CN: - /* word3 iocb=iotag32 wqe=seq_payload_len */ - wqe->xmit_bcast64.seq_payload_len = xmit_len; - /* word4 iocb=rsvd wqe=rsvd */ - /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ - /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ - bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); - bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, - LPFC_WQE_LENLOC_WORD3); - bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); - break; - case CMD_FCP_IWRITE64_CR: - command_type = FCP_COMMAND_DATA_OUT; - /* word3 iocb=iotag wqe=payload_offset_len */ - /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - bf_set(payload_offset_len, &wqe->fcp_iwrite, - xmit_len + sizeof(struct fcp_rsp)); - bf_set(cmd_buff_len, &wqe->fcp_iwrite, - 0); - /* word4 iocb=parameter wqe=total_xfer_length memcpy */ - /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ - bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, - iocbq->iocb.ulpFCP2Rcvy); - bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); - /* Always open the exchange */ - bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, - LPFC_WQE_LENLOC_WORD4); - bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); - bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); - if (iocbq->iocb_flag & LPFC_IO_OAS) { - bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); - bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); - if (iocbq->priority) { - bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, - (iocbq->priority << 1)); - } else { - bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, - (phba->cfg_XLanePriority << 1)); - } - } - /* Note, word 10 is already initialized to 0 */ - - /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ - if (phba->cfg_enable_pbde) - bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); - else - bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); - - if (phba->fcp_embed_io) { - struct lpfc_io_buf *lpfc_cmd; - struct sli4_sge *sgl; - struct fcp_cmnd *fcp_cmnd; - uint32_t *ptr; - - /* 128 byte wqe support here */ - - lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; - fcp_cmnd = lpfc_cmd->fcp_cmnd; - - /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; - wqe->generic.bde.addrHigh = 0; - wqe->generic.bde.addrLow = 88; /* Word 22 */ - - bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); - - /* Word 22-29 FCP CMND Payload */ - ptr = &wqe->words[22]; - memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); - } - break; - case CMD_FCP_IREAD64_CR: - /* word3 iocb=iotag wqe=payload_offset_len */ - /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - bf_set(payload_offset_len, &wqe->fcp_iread, - xmit_len + sizeof(struct fcp_rsp)); - bf_set(cmd_buff_len, &wqe->fcp_iread, - 0); - /* word4 iocb=parameter wqe=total_xfer_length memcpy */ - /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ - bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, - iocbq->iocb.ulpFCP2Rcvy); - bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); - /* Always open the exchange */ - bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); - bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, - LPFC_WQE_LENLOC_WORD4); - bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); - bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); - if (iocbq->iocb_flag & LPFC_IO_OAS) { - bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); - bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); - if (iocbq->priority) { - bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, - (iocbq->priority << 1)); - } else { - bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, - (phba->cfg_XLanePriority << 1)); - } - } - /* Note, word 10 is already initialized to 0 */ - - /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ - if (phba->cfg_enable_pbde) - bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); - else - bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); - - if (phba->fcp_embed_io) { - struct lpfc_io_buf *lpfc_cmd; - struct sli4_sge *sgl; - struct fcp_cmnd *fcp_cmnd; - uint32_t *ptr; - - /* 128 byte wqe support here */ - - lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; - fcp_cmnd = lpfc_cmd->fcp_cmnd; - - /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; - wqe->generic.bde.addrHigh = 0; - wqe->generic.bde.addrLow = 88; /* Word 22 */ - - bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); - - /* Word 22-29 FCP CMND Payload */ - ptr = &wqe->words[22]; - memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); - } - break; - case CMD_FCP_ICMND64_CR: - /* word3 iocb=iotag wqe=payload_offset_len */ - /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - bf_set(payload_offset_len, &wqe->fcp_icmd, - xmit_len + sizeof(struct fcp_rsp)); - bf_set(cmd_buff_len, &wqe->fcp_icmd, - 0); - /* word3 iocb=IO_TAG wqe=reserved */ - bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); - /* Always open the exchange */ - bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); - bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, - LPFC_WQE_LENLOC_NONE); - bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, - iocbq->iocb.ulpFCP2Rcvy); - if (iocbq->iocb_flag & LPFC_IO_OAS) { - bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); - bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); - if (iocbq->priority) { - bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, - (iocbq->priority << 1)); - } else { - bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, - (phba->cfg_XLanePriority << 1)); - } - } - /* Note, word 10 is already initialized to 0 */ - - if (phba->fcp_embed_io) { - struct lpfc_io_buf *lpfc_cmd; - struct sli4_sge *sgl; - struct fcp_cmnd *fcp_cmnd; - uint32_t *ptr; - - /* 128 byte wqe support here */ - - lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; - fcp_cmnd = lpfc_cmd->fcp_cmnd; - - /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; - wqe->generic.bde.addrHigh = 0; - wqe->generic.bde.addrLow = 88; /* Word 22 */ - - bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); - bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); - - /* Word 22-29 FCP CMND Payload */ - ptr = &wqe->words[22]; - memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); - } - break; - case CMD_GEN_REQUEST64_CR: - /* For this command calculate the xmit length of the - * request bde. - */ - xmit_len = 0; - numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / - sizeof(struct ulp_bde64); - for (i = 0; i < numBdes; i++) { - bde.tus.w = le32_to_cpu(bpl[i].tus.w); - if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) - break; - xmit_len += bde.tus.f.bdeSize; - } - /* word3 iocb=IO_TAG wqe=request_payload_len */ - wqe->gen_req.request_payload_len = xmit_len; - /* word4 iocb=parameter wqe=relative_offset memcpy */ - /* word5 [rctl, type, df_ctl, la] copied in memcpy */ - /* word6 context tag copied in memcpy */ - if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { - ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2015 Invalid CT %x command 0x%x\n", - ct, iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); - bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); - bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); - bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); - bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); - bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); - bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); - wqe->gen_req.max_response_payload_len = total_len - xmit_len; - command_type = OTHER_COMMAND; - break; - case CMD_XMIT_ELS_RSP64_CX: - ndlp = (struct lpfc_nodelist *)iocbq->context1; - /* words0-2 BDE memcpy */ - /* word3 iocb=iotag32 wqe=response_payload_len */ - wqe->xmit_els_rsp.response_payload_len = xmit_len; - /* word4 */ - wqe->xmit_els_rsp.word4 = 0; - /* word5 iocb=rsvd wge=did */ - bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, - iocbq->iocb.un.xseq64.xmit_els_remoteID); - - if_type = bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf); - if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { - if (iocbq->vport->fc_flag & FC_PT2PT) { - bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); - bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, - iocbq->vport->fc_myDID); - if (iocbq->vport->fc_myDID == Fabric_DID) { - bf_set(wqe_els_did, - &wqe->xmit_els_rsp.wqe_dest, 0); - } - } - } - bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); - bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, - iocbq->iocb.unsli3.rcvsli3.ox_id); - if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) - bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, - phba->vpi_ids[iocbq->vport->vpi]); - bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); - bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); - bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, - LPFC_WQE_LENLOC_WORD3); - bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); - bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, - phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); - if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { - bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); - bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, - iocbq->vport->fc_myDID); - bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); - bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, - phba->vpi_ids[phba->pport->vpi]); - } - command_type = OTHER_COMMAND; - break; - case CMD_CLOSE_XRI_CN: - case CMD_ABORT_XRI_CN: - case CMD_ABORT_XRI_CX: - /* words 0-2 memcpy should be 0 rserved */ - /* port will send abts */ - abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; - if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { - abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; - fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; - } else - fip = 0; - - if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) - /* - * The link is down, or the command was ELS_FIP - * so the fw does not need to send abts - * on the wire. - */ - bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); - else - bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); - bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); - /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ - wqe->abort_cmd.rsrvd5 = 0; - bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - abort_tag = iocbq->iocb.un.acxri.abortIoTag; - /* - * The abort handler will send us CMD_ABORT_XRI_CN or - * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX - */ - bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, - LPFC_WQE_LENLOC_NONE); - cmnd = CMD_ABORT_XRI_CX; - command_type = OTHER_COMMAND; - xritag = 0; - break; - case CMD_XMIT_BLS_RSP64_CX: - ndlp = (struct lpfc_nodelist *)iocbq->context1; - /* As BLS ABTS RSP WQE is very different from other WQEs, - * we re-construct this WQE here based on information in - * iocbq from scratch. - */ - memset(wqe, 0, sizeof(*wqe)); - /* OX_ID is invariable to who sent ABTS to CT exchange */ - bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); - if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == - LPFC_ABTS_UNSOL_INT) { - /* ABTS sent by initiator to CT exchange, the - * RX_ID field will be filled with the newly - * allocated responder XRI. - */ - bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - iocbq->sli4_xritag); - } else { - /* ABTS sent by responder to CT exchange, the - * RX_ID field will be filled with the responder - * RX_ID from ABTS. - */ - bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); - } - bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); - bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); - - /* Use CT=VPI */ - bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, - ndlp->nlp_DID); - bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, - iocbq->iocb.ulpContext); - bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); - bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, - phba->vpi_ids[phba->pport->vpi]); - bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); - bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, - LPFC_WQE_LENLOC_NONE); - /* Overwrite the pre-set comnd type with OTHER_COMMAND */ - command_type = OTHER_COMMAND; - if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { - bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, - bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); - bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, - bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); - bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, - bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); - } - - break; - case CMD_SEND_FRAME: - bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME); - bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */ - bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */ - bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1); - bf_set(wqe_xbl, &wqe->generic.wqe_com, 1); - bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); - bf_set(wqe_xc, &wqe->generic.wqe_com, 1); - bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA); - bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); - bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); - bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); - return 0; - case CMD_XRI_ABORTED_CX: - case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ - case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ - case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ - case CMD_FCP_TRSP64_CX: /* Target mode rcv */ - case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ - default: - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2014 Invalid command 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - - if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) - bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); - else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) - bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); - else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) - bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); - iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | - LPFC_IO_DIF_INSERT); - bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); - bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); - wqe->generic.wqe_com.abort_tag = abort_tag; - bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); - bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); - bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); - bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); - return 0; -} - -/** * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb * @phba: Pointer to HBA context object. * @ring_number: SLI ring number to issue wqe on. @@ -10957,7 +10318,17 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, int rc; struct lpfc_io_buf *lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; - union lpfc_wqe128 *wqe = &piocb->wqe; + + lpfc_prep_embed_io(phba, lpfc_cmd); + rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); + return rc; +} + +void +lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +{ + struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; + union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; struct sli4_sge *sgl; /* 128 byte wqe support here */ @@ -10995,7 +10366,7 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, } /* add the VMID tags as per switch response */ - if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) { + if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) { if (phba->pport->vmid_priority_tagging) { bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, @@ -11006,8 +10377,6 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, wqe->words[31] = piocb->vmid_tag.app_id; } } - rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); - return rc; } /** @@ -11029,13 +10398,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sglq *sglq; - union lpfc_wqe128 wqe; + union lpfc_wqe128 *wqe; struct lpfc_queue *wq; struct lpfc_sli_ring *pring; + u32 ulp_command = get_job_cmnd(phba, piocb); /* Get the WQ */ - if ((piocb->iocb_flag & LPFC_IO_FCP) || - (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if ((piocb->cmd_flag & LPFC_IO_FCP) || + (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; } else { wq = phba->sli4_hba.els_wq; @@ -11049,34 +10419,24 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, */ lockdep_assert_held(&pring->ring_lock); - + wqe = &piocb->wqe; if (piocb->sli4_xritag == NO_XRI) { - if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + if (ulp_command == CMD_ABORT_XRI_CX) sglq = NULL; else { - if (!list_empty(&pring->txq)) { + sglq = __lpfc_sli_get_els_sglq(phba, piocb); + if (!sglq) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, - pring, piocb); + pring, + piocb); return IOCB_SUCCESS; } else { return IOCB_BUSY; } - } else { - sglq = __lpfc_sli_get_els_sglq(phba, piocb); - if (!sglq) { - if (!(flag & SLI_IOCB_RET_IOCB)) { - __lpfc_sli_ringtx_put(phba, - pring, - piocb); - return IOCB_SUCCESS; - } else - return IOCB_BUSY; - } } } - } else if (piocb->iocb_flag & LPFC_IO_FCP) { + } else if (piocb->cmd_flag & LPFC_IO_FCP) { /* These IO's already have an XRI and a mapped sgl. */ sglq = NULL; } @@ -11093,15 +10453,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (sglq) { piocb->sli4_lxritag = sglq->sli4_lxritag; piocb->sli4_xritag = sglq->sli4_xritag; - if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) + + /* ABTS sent by initiator to CT exchange, the + * RX_ID field will be filled with the newly + * allocated responder XRI. + */ + if (ulp_command == CMD_XMIT_BLS_RSP64_CX && + piocb->abort_bls == LPFC_ABTS_UNSOL_INT) + bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, + piocb->sli4_xritag); + + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, + piocb->sli4_xritag); + + if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI) return IOCB_ERROR; } - if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) + if (lpfc_sli4_wq_put(wq, wqe)) return IOCB_ERROR; - if (lpfc_sli4_wq_put(wq, &wqe)) - return IOCB_ERROR; lpfc_sli_ringtxcmpl_put(phba, pring, piocb); return 0; @@ -11144,6 +10515,390 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); } +static void +__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, + u32 elscmd, u8 tmo, u8 expect_rsp) +{ + struct lpfc_hba *phba = vport->phba; + IOCB_t *cmd; + + cmd = &cmdiocbq->iocb; + memset(cmd, 0, sizeof(*cmd)); + + cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + + if (expect_rsp) { + cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); + cmd->un.elsreq64.remoteID = did; /* DID */ + cmd->ulpCommand = CMD_ELS_REQUEST64_CR; + cmd->ulpTimeout = tmo; + } else { + cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64); + cmd->un.genreq64.xmit_els_remoteID = did; /* DID */ + cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; + } + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; + cmd->ulpClass = CLASS3; + + /* If we have NPIV enabled, we want to send ELS traffic by VPI. */ + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (expect_rsp) { + cmd->un.elsreq64.myID = vport->fc_myDID; + + /* For ELS_REQUEST64_CR, use the VPI by default */ + cmd->ulpContext = phba->vpi_ids[vport->vpi]; + } + + cmd->ulpCt_h = 0; + /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ + if (elscmd == ELS_CMD_ECHO) + cmd->ulpCt_l = 0; /* context = invalid RPI */ + else + cmd->ulpCt_l = 1; /* context = VPI */ + } +} + +static void +__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, + struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did, + u32 elscmd, u8 tmo, u8 expect_rsp) +{ + struct lpfc_hba *phba = vport->phba; + union lpfc_wqe128 *wqe; + struct ulp_bde64_le *bde; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Word 0 - 2 BDE */ + bde = (struct ulp_bde64_le *)&wqe->generic.bde; + bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); + bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); + bde->type_size = cpu_to_le32(cmd_size); + bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + + if (expect_rsp) { + bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_CR); + + /* Transfer length */ + wqe->els_req.payload_len = cmd_size; + wqe->els_req.max_response_payload_len = FCELSSIZE; + + /* DID */ + bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did); + } else { + /* DID */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did); + + /* Transfer length */ + wqe->xmit_els_rsp.response_payload_len = cmd_size; + + bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com, + CMD_XMIT_ELS_RSP64_CX); + } + + bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo); + bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + + /* If we have NPIV enabled, we want to send ELS traffic by VPI. + * For SLI4, since the driver controls VPIs we also want to include + * all ELS pt2pt protocol traffic as well. + */ + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || + (vport->fc_flag & FC_PT2PT)) { + if (expect_rsp) { + bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID); + + /* For ELS_REQUEST64_CR, use the VPI by default */ + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->vpi_ids[vport->vpi]); + } + + /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ + if (elscmd == ELS_CMD_ECHO) + bf_set(wqe_ct, &wqe->generic.wqe_com, 0); + else + bf_set(wqe_ct, &wqe->generic.wqe_com, 1); + } +} + +void +lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, + u16 cmd_size, u32 did, u32 elscmd, u8 tmo, + u8 expect_rsp) +{ + phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did, + elscmd, tmo, expect_rsp); +} + +static void +__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, + u16 rpi, u32 num_entry, u8 tmo) +{ + IOCB_t *cmd; + + cmd = &cmdiocbq->iocb; + memset(cmd, 0, sizeof(*cmd)); + + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64); + + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; + cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + + cmd->ulpContext = rpi; + cmd->ulpClass = CLASS3; + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->ulpBdeCount = 1; + cmd->ulpLe = 1; + cmd->ulpOwner = OWN_CHIP; + cmd->ulpTimeout = tmo; +} + +static void +__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp, + u16 rpi, u32 num_entry, u8 tmo) +{ + union lpfc_wqe128 *cmdwqe; + struct ulp_bde64_le *bde, *bpl; + u32 xmit_len = 0, total_len = 0, size, type, i; + + cmdwqe = &cmdiocbq->wqe; + memset(cmdwqe, 0, sizeof(*cmdwqe)); + + /* Calculate total_len and xmit_len */ + bpl = (struct ulp_bde64_le *)bmp->virt; + for (i = 0; i < num_entry; i++) { + size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; + total_len += size; + } + for (i = 0; i < num_entry; i++) { + size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK; + type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK; + if (type != ULP_BDE64_TYPE_BDE_64) + break; + xmit_len += size; + } + + /* Words 0 - 2 */ + bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde; + bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); + bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); + bde->type_size = cpu_to_le32(xmit_len); + bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64); + + /* Word 3 */ + cmdwqe->gen_req.request_payload_len = xmit_len; + + /* Word 5 */ + bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT); + bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); + bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1); + bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi); + + /* Word 7 */ + bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo); + bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3); + bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR); + bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI); + + /* Word 12 */ + cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len; +} + +void +lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo) +{ + phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo); +} + +static void +__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + IOCB_t *icmd; + + icmd = &cmdiocbq->iocb; + memset(icmd, 0, sizeof(*icmd)); + + icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); + icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); + icmd->un.xseq64.w5.hcsw.Fctl = LA; + if (last_seq) + icmd->un.xseq64.w5.hcsw.Fctl |= LS; + icmd->un.xseq64.w5.hcsw.Dfctl = 0; + icmd->un.xseq64.w5.hcsw.Rctl = rctl; + icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; + + icmd->ulpBdeCount = 1; + icmd->ulpLe = 1; + icmd->ulpClass = CLASS3; + + switch (cr_cx_cmd) { + case CMD_XMIT_SEQUENCE64_CR: + icmd->ulpContext = rpi; + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; + break; + case CMD_XMIT_SEQUENCE64_CX: + icmd->ulpContext = ox_id; + icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; + break; + default: + break; + } +} + +static void +__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + union lpfc_wqe128 *wqe; + struct ulp_bde64 *bpl; + struct ulp_bde64_le *bde; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Words 0 - 2 */ + bpl = (struct ulp_bde64 *)bmp->virt; + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) { + wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh; + wqe->xmit_sequence.bde.addrLow = bpl->addrLow; + wqe->xmit_sequence.bde.tus.w = bpl->tus.w; + } else { + bde = (struct ulp_bde64_le *)&wqe->xmit_sequence.bde; + bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys)); + bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys)); + bde->type_size = cpu_to_le32(bpl->tus.f.bdeSize); + bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64); + } + + /* Word 5 */ + bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq); + bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1); + bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); + bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl); + bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT); + + /* Word 6 */ + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi); + + bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, + CMD_XMIT_SEQUENCE64_WQE); + + /* Word 7 */ + bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); + + /* Word 9 */ + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id); + + /* Word 12 */ + if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) + wqe->xmit_sequence.xmit_len = full_size; + else + wqe->xmit_sequence.xmit_len = + wqe->xmit_sequence.bde.tus.f.bdeSize; +} + +void +lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id, + u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd) +{ + phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry, + rctl, last_seq, cr_cx_cmd); +} + +static void +__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, + u16 iotag, u8 ulp_class, u16 cqid, bool ia) +{ + IOCB_t *icmd = NULL; + + icmd = &cmdiocbq->iocb; + memset(icmd, 0, sizeof(*icmd)); + + /* Word 5 */ + icmd->un.acxri.abortContextTag = ulp_context; + icmd->un.acxri.abortIoTag = iotag; + + if (ia) { + /* Word 7 */ + icmd->ulpCommand = CMD_CLOSE_XRI_CN; + } else { + /* Word 3 */ + icmd->un.acxri.abortType = ABORT_TYPE_ABTS; + + /* Word 7 */ + icmd->ulpClass = ulp_class; + icmd->ulpCommand = CMD_ABORT_XRI_CN; + } + + /* Word 7 */ + icmd->ulpLe = 1; +} + +static void +__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context, + u16 iotag, u8 ulp_class, u16 cqid, bool ia) +{ + union lpfc_wqe128 *wqe; + + wqe = &cmdiocbq->wqe; + memset(wqe, 0, sizeof(*wqe)); + + /* Word 3 */ + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + if (ia) + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + else + bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); + + /* Word 7 */ + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE); + + /* Word 8 */ + wqe->abort_cmd.wqe_com.abort_tag = ulp_context; + + /* Word 9 */ + bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag); + + /* Word 10 */ + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); + + /* Word 11 */ + bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid); + bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); +} + +void +lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, + u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid, + bool ia) +{ + phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class, + cqid, ia); +} + /** * lpfc_sli_api_table_setup - Set up sli api function jump table * @phba: The hba struct for which this call is being executed. @@ -11162,11 +10917,19 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3; + phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3; + phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3; + phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3; + phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3; break; case LPFC_PCI_DEV_OC: phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; + phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4; + phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4; + phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4; + phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4; break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -11174,7 +10937,6 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) dev_grp); return -ENODEV; } - phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; return 0; } @@ -11193,14 +10955,14 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { struct lpfc_io_buf *lpfc_cmd; - if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (unlikely(!phba->sli4_hba.hdwq)) return NULL; /* * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) { lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; piocb->hba_wqidx = lpfc_cmd->hdwq_no; } @@ -11236,6 +10998,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, int rc; if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli_prep_wqe(phba, piocb); + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; pring = lpfc_sli4_calc_ring(phba, piocb); @@ -11372,8 +11136,8 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba, lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3095 Event Context not found, no " "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", - iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, - vpi, rpi); + vpi, rpi, iocbq->iocb.ulpStatus, + iocbq->iocb.ulpContext); } /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. @@ -12221,47 +11985,33 @@ static void lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { - IOCB_t *irsp = &rspiocb->iocb; - uint16_t abort_iotag, abort_context; - struct lpfc_iocbq *abort_iocb = NULL; - - if (irsp->ulpStatus) { + u32 ulp_status = get_job_ulpstatus(phba, rspiocb); + u32 ulp_word4 = get_job_word4(phba, rspiocb); + u8 cmnd = get_job_cmnd(phba, cmdiocb); + if (ulp_status) { /* * Assume that the port already completed and returned, or * will return the iocb. Just Log the message. */ - abort_context = cmdiocb->iocb.un.acxri.abortContextTag; - abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; - - spin_lock_irq(&phba->hbalock); if (phba->sli_rev < LPFC_SLI_REV4) { - if (irsp->ulpCommand == CMD_ABORT_XRI_CX && - irsp->ulpStatus == IOSTAT_LOCAL_REJECT && - irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { - spin_unlock_irq(&phba->hbalock); + if (cmnd == CMD_ABORT_XRI_CX && + ulp_status == IOSTAT_LOCAL_REJECT && + ulp_word4 == IOERR_ABORT_REQUESTED) { goto release_iocb; } - if (abort_iotag != 0 && - abort_iotag <= phba->sli.last_iotag) - abort_iocb = - phba->sli.iocbq_lookup[abort_iotag]; - } else - /* For sli4 the abort_tag is the XRI, - * so the abort routine puts the iotag of the iocb - * being aborted in the context field of the abort - * IOCB. - */ - abort_iocb = phba->sli.iocbq_lookup[abort_context]; + } lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, "0327 Cannot abort els iocb x%px " - "with tag %x context %x, abort status %x, " - "abort code %x\n", - abort_iocb, abort_iotag, abort_context, - irsp->ulpStatus, irsp->un.ulpWord[4]); + "with io cmd xri %x abort tag : x%x, " + "abort status %x abort code %x\n", + cmdiocb, get_job_abtsiotag(phba, cmdiocb), + (phba->sli_rev == LPFC_SLI_REV4) ? + get_wqe_reqtag(cmdiocb) : + cmdiocb->iocb.un.acxri.abortContextTag, + ulp_status, ulp_word4); - spin_unlock_irq(&phba->hbalock); } release_iocb: lpfc_sli_release_iocbq(phba, cmdiocb); @@ -12284,19 +12034,31 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp = NULL; - IOCB_t *irsp = &rspiocb->iocb; + IOCB_t *irsp; + u32 ulp_command, ulp_status, ulp_word4, iotag; + + ulp_command = get_job_cmnd(phba, cmdiocb); + ulp_status = get_job_ulpstatus(phba, rspiocb); + ulp_word4 = get_job_word4(phba, rspiocb); + + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = get_wqe_reqtag(cmdiocb); + } else { + irsp = &rspiocb->iocb; + iotag = irsp->ulpIoTag; + } /* ELS cmd tag <ulpIoTag> completes */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0139 Ignoring ELS cmd code x%x completion Data: " "x%x x%x x%x\n", - irsp->ulpIoTag, irsp->ulpStatus, - irsp->un.ulpWord[4], irsp->ulpTimeout); + ulp_command, ulp_status, ulp_word4, iotag); + /* * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp * if exchange is busy. */ - if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { + if (ulp_command == CMD_GEN_REQUEST64_CR) { ndlp = cmdiocb->context_un.ndlp; lpfc_ct_free_iocb(phba, cmdiocb); } else { @@ -12328,28 +12090,29 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_iocbq *abtsiocbp; - IOCB_t *icmd = NULL; - IOCB_t *iabt = NULL; int retval = IOCB_ERROR; unsigned long iflags; - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = NULL; + u32 ulp_command = get_job_cmnd(phba, cmdiocb); + u16 ulp_context, iotag; + bool ia; /* * There are certain command types we don't want to abort. And we * don't want to abort commands that are already in the process of * being aborted. */ - icmd = &cmdiocb->iocb; - if (icmd->ulpCommand == CMD_ABORT_XRI_CN || - icmd->ulpCommand == CMD_CLOSE_XRI_CN || - cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) + if (ulp_command == CMD_ABORT_XRI_WQE || + ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED) return IOCB_ABORTING; if (!pring) { - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; else - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; return retval; } @@ -12359,10 +12122,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, */ if ((vport->load_flag & FC_UNLOADING) && pring->ringno == LPFC_ELS_RING) { - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl; else - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl; return retval; } @@ -12374,43 +12137,46 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* This signals the response to set the correct status * before calling the completion handler */ - cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; + cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; - iabt = &abtsiocbp->iocb; - iabt->un.acxri.abortType = ABORT_TYPE_ABTS; - iabt->un.acxri.abortContextTag = icmd->ulpContext; if (phba->sli_rev == LPFC_SLI_REV4) { - iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; - if (pring->ringno == LPFC_ELS_RING) - iabt->un.acxri.abortContextTag = cmdiocb->iotag; + ulp_context = cmdiocb->sli4_xritag; + iotag = abtsiocbp->iotag; } else { - iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + iotag = cmdiocb->iocb.ulpIoTag; if (pring->ringno == LPFC_ELS_RING) { ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); - iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; + ulp_context = ndlp->nlp_rpi; + } else { + ulp_context = cmdiocb->iocb.ulpContext; } } - iabt->ulpLe = 1; - iabt->ulpClass = icmd->ulpClass; - - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; - if (cmdiocb->iocb_flag & LPFC_IO_FCP) - abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); - if (cmdiocb->iocb_flag & LPFC_IO_FOF) - abtsiocbp->iocb_flag |= LPFC_IO_FOF; if (phba->link_state < LPFC_LINK_UP || (phba->sli_rev == LPFC_SLI_REV4 && phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN)) - iabt->ulpCommand = CMD_CLOSE_XRI_CN; + ia = true; else - iabt->ulpCommand = CMD_ABORT_XRI_CN; + ia = false; + + lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag, + cmdiocb->iocb.ulpClass, + LPFC_WQE_CQ_ID_DEFAULT, ia); + + abtsiocbp->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; + if (cmdiocb->cmd_flag & LPFC_IO_FCP) + abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); + + if (cmdiocb->cmd_flag & LPFC_IO_FOF) + abtsiocbp->cmd_flag |= LPFC_IO_FOF; if (cmpl) - abtsiocbp->iocb_cmpl = cmpl; + abtsiocbp->cmd_cmpl = cmpl; else - abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; + abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl; abtsiocbp->vport = vport; if (phba->sli_rev == LPFC_SLI_REV4) { @@ -12430,14 +12196,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abort_iotag_exit: lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, - "0339 Abort xri x%x, original iotag x%x, " - "abort cmd iotag x%x retval x%x\n", - iabt->un.acxri.abortIoTag, - iabt->un.acxri.abortContextTag, - abtsiocbp->iotag, retval); - + "0339 Abort IO XRI x%x, Original iotag x%x, " + "abort tag x%x Cmdjob : x%px Abortjob : x%px " + "retval x%x\n", + ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ? + cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp, + retval); if (retval) { - cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; + cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocbp); } @@ -12495,7 +12261,7 @@ static int lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport) { - IOCB_t *icmd = NULL; + u8 ulp_command; /* No null ptr vports */ if (!iocbq || iocbq->vport != vport) @@ -12504,12 +12270,13 @@ lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, /* iocb must be for FCP IO, already exists on the TX cmpl queue, * can't be premarked as driver aborted, nor be an ABORT iocb itself */ - icmd = &iocbq->iocb; - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) || - (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || - (icmd->ulpCommand == CMD_ABORT_XRI_CN || - icmd->ulpCommand == CMD_CLOSE_XRI_CN)) + ulp_command = get_job_cmnd(vport->phba, iocbq); + if (!(iocbq->cmd_flag & LPFC_IO_FCP) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) || + (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || + (ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + ulp_command == CMD_ABORT_XRI_WQE)) return -EINVAL; return 0; @@ -12601,9 +12368,9 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq; - IOCB_t *icmd = NULL; int sum, i; unsigned long iflags; + u8 ulp_command; spin_lock_irqsave(&phba->hbalock, iflags); for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { @@ -12611,14 +12378,15 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, if (!iocbq || iocbq->vport != vport) continue; - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) + if (!(iocbq->cmd_flag & LPFC_IO_FCP) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) continue; /* Include counting outstanding aborts */ - icmd = &iocbq->iocb; - if (icmd->ulpCommand == CMD_ABORT_XRI_CN || - icmd->ulpCommand == CMD_CLOSE_XRI_CN) { + ulp_command = get_job_cmnd(phba, iocbq); + if (ulp_command == CMD_ABORT_XRI_CN || + ulp_command == CMD_CLOSE_XRI_CN || + ulp_command == CMD_ABORT_XRI_WQE) { sum++; continue; } @@ -12633,33 +12401,6 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, } /** - * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs - * @phba: Pointer to HBA context object - * @cmdiocb: Pointer to command iocb object. - * @wcqe: pointer to the complete wcqe - * - * This function is called when an aborted FCP iocb completes. This - * function is called by the ring event handler with no lock held. - * This function frees the iocb. It is called for sli-4 adapters. - **/ -void -lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *wcqe) -{ - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3017 ABORT_XRI_CN completing on rpi x%x " - "original iotag x%x, abort cmd iotag x%x " - "status 0x%x, reason 0x%x\n", - cmdiocb->iocb.un.acxri.abortContextTag, - cmdiocb->iocb.un.acxri.abortIoTag, - cmdiocb->iotag, - (bf_get(lpfc_wcqe_c_status, wcqe) - & LPFC_IOCB_STATUS_MASK), - wcqe->parameter); - lpfc_sli_release_iocbq(phba, cmdiocb); -} - -/** * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. @@ -12674,13 +12415,15 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3096 ABORT_XRI_CN completing on rpi x%x " + "3096 ABORT_XRI_CX completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "status 0x%x, reason 0x%x\n", + (phba->sli_rev == LPFC_SLI_REV4) ? + cmdiocb->sli4_xritag : cmdiocb->iocb.un.acxri.abortContextTag, - cmdiocb->iocb.un.acxri.abortIoTag, - cmdiocb->iotag, rspiocb->iocb.ulpStatus, - rspiocb->iocb.un.ulpWord[4]); + get_job_abtsiotag(phba, cmdiocb), + cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb), + get_job_word4(phba, rspiocb)); lpfc_sli_release_iocbq(phba, cmdiocb); return; } @@ -12721,7 +12464,6 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, int errcnt = 0, ret_val = 0; unsigned long iflags; int i; - void *fcp_cmpl = NULL; /* all I/Os are in process of being flushed */ if (phba->hba_flag & HBA_IOQ_FLUSH) @@ -12740,13 +12482,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id, spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli_rev == LPFC_SLI_REV3) { pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; - fcp_cmpl = lpfc_sli_abort_fcp_cmpl; } else if (phba->sli_rev == LPFC_SLI_REV4) { pring = lpfc_sli4_calc_ring(phba, iocbq); - fcp_cmpl = lpfc_sli4_abort_fcp_cmpl; } ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq, - fcp_cmpl); + lpfc_sli_abort_fcp_cmpl); spin_unlock_irqrestore(&phba->hbalock, iflags); if (ret_val != IOCB_SUCCESS) errcnt++; @@ -12788,12 +12528,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *lpfc_cmd; struct lpfc_iocbq *abtsiocbq; - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = NULL; struct lpfc_iocbq *iocbq; - IOCB_t *icmd; int sum, i, ret_val; unsigned long iflags; struct lpfc_sli_ring *pring_s4 = NULL; + u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT; + bool ia; spin_lock_irqsave(&phba->hbalock, iflags); @@ -12838,8 +12579,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, * If the iocbq is already being aborted, don't take a second * action, but do count it. */ - if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) || + !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); spin_unlock(&lpfc_cmd->buf_lock); @@ -12855,41 +12596,49 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, continue; } - icmd = &iocbq->iocb; - abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; - abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; - if (phba->sli_rev == LPFC_SLI_REV4) - abtsiocbq->iocb.un.acxri.abortIoTag = - iocbq->sli4_xritag; - else - abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; - abtsiocbq->iocb.ulpLe = 1; - abtsiocbq->iocb.ulpClass = icmd->ulpClass; - abtsiocbq->vport = vport; - - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbq->hba_wqidx = iocbq->hba_wqidx; - if (iocbq->iocb_flag & LPFC_IO_FCP) - abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; - if (iocbq->iocb_flag & LPFC_IO_FOF) - abtsiocbq->iocb_flag |= LPFC_IO_FOF; + if (phba->sli_rev == LPFC_SLI_REV4) { + iotag = abtsiocbq->iotag; + ulp_context = iocbq->sli4_xritag; + cqid = lpfc_cmd->hdwq->io_cq_map; + } else { + iotag = iocbq->iocb.ulpIoTag; + if (pring->ringno == LPFC_ELS_RING) { + ndlp = (struct lpfc_nodelist *)(iocbq->context1); + ulp_context = ndlp->nlp_rpi; + } else { + ulp_context = iocbq->iocb.ulpContext; + } + } ndlp = lpfc_cmd->rdata->pnode; if (lpfc_is_link_up(phba) && (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) - abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; + ia = false; else - abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; + ia = true; + + lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag, + iocbq->iocb.ulpClass, cqid, + ia); + + abtsiocbq->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbq->hba_wqidx = iocbq->hba_wqidx; + if (iocbq->cmd_flag & LPFC_IO_FCP) + abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX; + if (iocbq->cmd_flag & LPFC_IO_FOF) + abtsiocbq->cmd_flag |= LPFC_IO_FOF; /* Setup callback routine and issue the command. */ - abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl; /* * Indicate the IO is being aborted by the driver and set * the caller's flag into the aborted IO. */ - iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + iocbq->cmd_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -12936,9 +12685,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, wait_queue_head_t *pdone_q; unsigned long iflags; struct lpfc_io_buf *lpfc_cmd; + size_t offset = offsetof(struct lpfc_iocbq, wqe); spin_lock_irqsave(&phba->hbalock, iflags); - if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { + if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) { /* * A time out has occurred for the iocb. If a time out @@ -12947,26 +12697,27 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, */ spin_unlock_irqrestore(&phba->hbalock, iflags); - cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; - cmdiocbq->wait_iocb_cmpl = NULL; - if (cmdiocbq->iocb_cmpl) - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); + cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; + cmdiocbq->wait_cmd_cmpl = NULL; + if (cmdiocbq->cmd_cmpl) + (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL); else lpfc_sli_release_iocbq(phba, cmdiocbq); return; } - cmdiocbq->iocb_flag |= LPFC_IO_WAKE; + /* Copy the contents of the local rspiocb into the caller's buffer. */ + cmdiocbq->cmd_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) - memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, - &rspiocbq->iocb, sizeof(IOCB_t)); + memcpy((char *)cmdiocbq->context2 + offset, + (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset); /* Set the exchange busy flag for task management commands */ - if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && - !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { + if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && + !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, cur_iocbq); - if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) + if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; else lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; @@ -12985,7 +12736,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, * @piocbq: Pointer to command iocb. * @flag: Flag to test. * - * This routine grabs the hbalock and then test the iocb_flag to + * This routine grabs the hbalock and then test the cmd_flag to * see if the passed in flag is set. * Returns: * 1 if flag is set. @@ -12999,7 +12750,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, int ret; spin_lock_irqsave(&phba->hbalock, iflags); - ret = piocbq->iocb_flag & flag; + ret = piocbq->cmd_flag & flag; spin_unlock_irqrestore(&phba->hbalock, iflags); return ret; @@ -13014,14 +12765,14 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, * @timeout: Timeout in number of seconds. * * This function issues the iocb to firmware and waits for the - * iocb to complete. The iocb_cmpl field of the shall be used + * iocb to complete. The cmd_cmpl field of the shall be used * to handle iocbs which time out. If the field is NULL, the * function shall free the iocbq structure. If more clean up is * needed, the caller is expected to provide a completion function * that will provide the needed clean up. If the iocb command is * not completed within timeout seconds, the function will either - * free the iocbq structure (if iocb_cmpl == NULL) or execute the - * completion function set in the iocb_cmpl field and then return + * free the iocbq structure (if cmd_cmpl == NULL) or execute the + * completion function set in the cmd_cmpl field and then return * a status of IOCB_TIMEDOUT. The caller should not free the iocb * resources if this function returns IOCB_TIMEDOUT. * The function waits for the iocb completion using an @@ -13033,7 +12784,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, * This function assumes that the iocb completions occur while * this function sleep. So, this function cannot be called from * the thread which process iocb completion for this ring. - * This function clears the iocb_flag of the iocb object before + * This function clears the cmd_flag of the iocb object before * issuing the iocb and the iocb completion handler sets this * flag and wakes this thread when the iocb completes. * The contents of the response iocb will be copied to prspiocbq @@ -13059,9 +12810,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, unsigned long iflags; bool iocb_completed = true; - if (phba->sli_rev >= LPFC_SLI_REV4) + if (phba->sli_rev >= LPFC_SLI_REV4) { + lpfc_sli_prep_wqe(phba, piocb); + pring = lpfc_sli4_calc_ring(phba, piocb); - else + } else pring = &phba->sli.sli3_ring[ring_number]; /* * If the caller has provided a response iocbq buffer, then context2 @@ -13073,10 +12826,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->context2 = prspiocbq; } - piocb->wait_iocb_cmpl = piocb->iocb_cmpl; - piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; + piocb->wait_cmd_cmpl = piocb->cmd_cmpl; + piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait; piocb->context_un.wait_queue = &done_q; - piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); + piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) @@ -13094,7 +12847,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); spin_lock_irqsave(&phba->hbalock, iflags); - if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { + if (!(piocb->cmd_flag & LPFC_IO_WAKE)) { /* * IOCB timed out. Inform the wake iocb wait @@ -13102,7 +12855,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, */ iocb_completed = false; - piocb->iocb_flag |= LPFC_IO_WAKE_TMO; + piocb->cmd_flag |= LPFC_IO_WAKE_TMO; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (iocb_completed) { @@ -13157,7 +12910,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->context2 = NULL; piocb->context_un.wait_queue = NULL; - piocb->iocb_cmpl = NULL; + piocb->cmd_cmpl = NULL; return retval; } @@ -14098,123 +13851,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) } /** - * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn - * @phba: pointer to lpfc hba data structure - * @pIocbIn: pointer to the rspiocbq - * @pIocbOut: pointer to the cmdiocbq - * @wcqe: pointer to the complete wcqe - * - * This routine transfers the fields of a command iocbq to a response iocbq - * by copying all the IOCB fields from command iocbq and transferring the - * completion status information from the complete wcqe. - **/ -static void -lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, - struct lpfc_iocbq *pIocbIn, - struct lpfc_iocbq *pIocbOut, - struct lpfc_wcqe_complete *wcqe) -{ - int numBdes, i; - unsigned long iflags; - uint32_t status, max_response; - struct lpfc_dmabuf *dmabuf; - struct ulp_bde64 *bpl, bde; - size_t offset = offsetof(struct lpfc_iocbq, iocb); - - memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, - sizeof(struct lpfc_iocbq) - offset); - /* Map WCQE parameters into irspiocb parameters */ - status = bf_get(lpfc_wcqe_c_status, wcqe); - pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); - if (pIocbOut->iocb_flag & LPFC_IO_FCP) - if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) - pIocbIn->iocb.un.fcpi.fcpi_parm = - pIocbOut->iocb.un.fcpi.fcpi_parm - - wcqe->total_data_placed; - else - pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - else { - pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - switch (pIocbOut->iocb.ulpCommand) { - case CMD_ELS_REQUEST64_CR: - dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; - bpl = (struct ulp_bde64 *)dmabuf->virt; - bde.tus.w = le32_to_cpu(bpl[1].tus.w); - max_response = bde.tus.f.bdeSize; - break; - case CMD_GEN_REQUEST64_CR: - max_response = 0; - if (!pIocbOut->context3) - break; - numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ - sizeof(struct ulp_bde64); - dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; - bpl = (struct ulp_bde64 *)dmabuf->virt; - for (i = 0; i < numBdes; i++) { - bde.tus.w = le32_to_cpu(bpl[i].tus.w); - if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) - max_response += bde.tus.f.bdeSize; - } - break; - default: - max_response = wcqe->total_data_placed; - break; - } - if (max_response < wcqe->total_data_placed) - pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; - else - pIocbIn->iocb.un.genreq64.bdl.bdeSize = - wcqe->total_data_placed; - } - - /* Convert BG errors for completion status */ - if (status == CQE_STATUS_DI_ERROR) { - pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; - - if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) - pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; - else - pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; - - pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; - if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ - pIocbIn->iocb.unsli3.sli3_bg.bgstat |= - BGS_GUARD_ERR_MASK; - if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ - pIocbIn->iocb.unsli3.sli3_bg.bgstat |= - BGS_APPTAG_ERR_MASK; - if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ - pIocbIn->iocb.unsli3.sli3_bg.bgstat |= - BGS_REFTAG_ERR_MASK; - - /* Check to see if there was any good data before the error */ - if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { - pIocbIn->iocb.unsli3.sli3_bg.bgstat |= - BGS_HI_WATER_MARK_PRESENT_MASK; - pIocbIn->iocb.unsli3.sli3_bg.bghm = - wcqe->total_data_placed; - } - - /* - * Set ALL the error bits to indicate we don't know what - * type of error it is. - */ - if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) - pIocbIn->iocb.unsli3.sli3_bg.bgstat |= - (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | - BGS_GUARD_ERR_MASK); - } - - /* Pick up HBA exchange busy condition */ - if (bf_get(lpfc_wcqe_c_xb, wcqe)) { - spin_lock_irqsave(&phba->hbalock, iflags); - pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; - spin_unlock_irqrestore(&phba->hbalock, iflags); - } -} - -/** - * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe + * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe * @phba: Pointer to HBA context object. * @irspiocbq: Pointer to work-queue completion queue entry. * @@ -14225,8 +13862,8 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, * Return: Pointer to the receive IOCBQ, NULL otherwise. **/ static struct lpfc_iocbq * -lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, - struct lpfc_iocbq *irspiocbq) +lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, + struct lpfc_iocbq *irspiocbq) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; @@ -14238,11 +13875,13 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; + spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", @@ -14252,13 +13891,18 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, return NULL; } - spin_lock_irqsave(&pring->ring_lock, iflags); + memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128)); + memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe)); + /* Put the iocb back on the txcmplq */ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); spin_unlock_irqrestore(&pring->ring_lock, iflags); - /* Fake the irspiocbq and copy necessary response information */ - lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } return irspiocbq; } @@ -15059,7 +14703,6 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, { struct lpfc_sli_ring *pring = cq->pring; struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq irspiocbq; unsigned long iflags; /* Check for response status */ @@ -15085,9 +14728,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Look up the FCP command IOCB and create pseudo response IOCB */ spin_lock_irqsave(&pring->ring_lock, iflags); pring->stats.iocb_event++; - spin_unlock_irqrestore(&pring->ring_lock, iflags); cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&pring->ring_lock, iflags); if (unlikely(!cmdiocbq)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0374 FCP complete with no corresponding " @@ -15098,39 +14741,31 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS cmdiocbq->isr_timestamp = cq->isr_timestamp; #endif - if (cmdiocbq->iocb_cmpl == NULL) { - if (cmdiocbq->wqe_cmpl) { - /* For FCP the flag is cleared in wqe_cmpl */ - if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) && - cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { - spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; - spin_unlock_irqrestore(&phba->hbalock, iflags); - } + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } - /* Pass the cmd_iocb and the wcqe to the upper layer */ - (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); - return; + if (cmdiocbq->cmd_cmpl) { + /* For FCP the flag is cleared in cmd_cmpl */ + if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) && + cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); } + + /* Pass the cmd_iocb and the wcqe to the upper layer */ + memcpy(&cmdiocbq->wcqe_cmpl, wcqe, + sizeof(struct lpfc_wcqe_complete)); + (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq); + } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " "iotag: (%d)\n", bf_get(lpfc_wcqe_c_request_tag, wcqe)); - return; } - - /* Only SLI4 non-IO commands stil use IOCB */ - /* Fake the irspiocb and copy necessary response information */ - lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); - - if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { - spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; - spin_unlock_irqrestore(&phba->hbalock, iflags); - } - - /* Pass the cmd_iocb and the rsp state to the upper layer */ - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); } /** @@ -18858,8 +18493,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3154 BLS ABORT RSP failed, data: x%x/x%x\n", - rsp_iocbq->iocb.ulpStatus, - rsp_iocbq->iocb.un.ulpWord[4]); + get_job_ulpstatus(phba, rsp_iocbq), + get_job_word4(phba, rsp_iocbq)); } /** @@ -18901,7 +18536,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp; uint16_t oxid, rxid, xri, lxri; uint32_t sid, fctl; - IOCB_t *icmd; + union lpfc_wqe128 *icmd; int rc; if (!lpfc_is_link_up(phba)) @@ -18929,22 +18564,11 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, if (!ctiocb) return; + icmd = &ctiocb->wqe; + /* Extract the F_CTL field from FC_HDR */ fctl = sli4_fctl_from_fc_hdr(fc_hdr); - icmd = &ctiocb->iocb; - icmd->un.xseq64.bdl.bdeSize = 0; - icmd->un.xseq64.bdl.ulpIoTag32 = 0; - icmd->un.xseq64.w5.hcsw.Dfctl = 0; - icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; - icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; - - /* Fill in the rest of iocb fields */ - icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; - icmd->ulpBdeCount = 0; - icmd->ulpLe = 1; - icmd->ulpClass = CLASS3; - icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; ctiocb->context1 = lpfc_nlp_get(ndlp); if (!ctiocb->context1) { lpfc_sli_release_iocbq(phba, ctiocb); @@ -18952,9 +18576,10 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, } ctiocb->vport = phba->pport; - ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; + ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; ctiocb->sli4_lxritag = NO_XRI; ctiocb->sli4_xritag = NO_XRI; + ctiocb->abort_rctl = FC_RCTL_BA_ACC; if (fctl & FC_FC_EX_CTX) /* Exchange responder sent the abort so we @@ -18974,10 +18599,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, */ if ((fctl & FC_FC_EX_CTX) && (lxri > lpfc_sli4_get_iocb_cnt(phba))) { - icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; - bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); - bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); - bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + ctiocb->abort_rctl = FC_RCTL_BA_RJT; + bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); + bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, + FC_BA_RJT_INV_XID); + bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, + FC_BA_RJT_UNABLE); } /* If BA_ABTS failed to abort a partially assembled receive sequence, @@ -18985,10 +18612,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * the IOCB for a BA_RJT. */ if (aborted == false) { - icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; - bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); - bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); - bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + ctiocb->abort_rctl = FC_RCTL_BA_RJT; + bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0); + bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp, + FC_BA_RJT_INV_XID); + bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp, + FC_BA_RJT_UNABLE); } if (fctl & FC_FC_EX_CTX) { @@ -18996,28 +18625,40 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * of BA_ACC will use OX_ID from ABTS for the XRI_TAG * field and RX_ID from ABTS for RX_ID field. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); + ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP; + bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the * XRI_TAG field. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); + ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT; } - bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); - bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); + + /* OX_ID is invariable to who sent ABTS to CT exchange */ + bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid); + bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid); + + /* Use CT=VPI */ + bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest, + ndlp->nlp_DID); + bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX); + /* Xmit CT abts response on exchange <xid> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", - icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); + ctiocb->abort_rctl, oxid, phba->link_state); + lpfc_sli_prep_wqe(phba, ctiocb); rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); if (rc == IOCB_ERROR) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "2925 Failed to issue CT ABTS RSP x%x on " "xri x%x, Data x%x\n", - icmd->un.xseq64.w5.hcsw.Rctl, oxid, + ctiocb->abort_rctl, oxid, phba->link_state); lpfc_nlp_put(ndlp); ctiocb->context1 = NULL; @@ -19140,7 +18781,6 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) struct fc_frame_header *fc_hdr; uint32_t sid; uint32_t len, tot_len; - struct ulp_bde64 *pbde; fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; /* remove from receive buffer list */ @@ -19153,40 +18793,40 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq = lpfc_sli_get_iocbq(vport->phba); if (first_iocbq) { /* Initialize the first IOCB. */ - first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; - first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; + first_iocbq->wcqe_cmpl.total_data_placed = 0; + bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl, + IOSTAT_SUCCESS); first_iocbq->vport = vport; /* Check FC Header to see what TYPE of frame we are rcv'ing */ if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { - first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; - first_iocbq->iocb.un.rcvels.parmRo = - sli4_did_from_fc_hdr(fc_hdr); - first_iocbq->iocb.ulpPU = PARM_NPIV_DID; - } else - first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; - first_iocbq->iocb.ulpContext = NO_XRI; - first_iocbq->iocb.unsli3.rcvsli3.ox_id = - be16_to_cpu(fc_hdr->fh_ox_id); - /* iocbq is prepped for internal consumption. Physical vpi. */ - first_iocbq->iocb.unsli3.rcvsli3.vpi = - vport->phba->vpi_ids[vport->vpi]; - /* put the first buffer into the first IOCBq */ + bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp, + sli4_did_from_fc_hdr(fc_hdr)); + } + + bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com, + NO_XRI); + bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com, + be16_to_cpu(fc_hdr->fh_ox_id)); + + /* put the first buffer into the first iocb */ tot_len = bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); first_iocbq->context2 = &seq_dmabuf->dbuf; first_iocbq->context3 = NULL; - first_iocbq->iocb.ulpBdeCount = 1; + /* Keep track of the BDE count */ + first_iocbq->wcqe_cmpl.word3 = 1; + if (tot_len > LPFC_DATA_BUF_SIZE) - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; + first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; else - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; - - first_iocbq->iocb.un.rcvels.remoteID = sid; + first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len; - first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; + first_iocbq->wcqe_cmpl.total_data_placed = tot_len; + bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest, + sid); } iocbq = first_iocbq; /* @@ -19200,28 +18840,23 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) } if (!iocbq->context3) { iocbq->context3 = d_buf; - iocbq->iocb.ulpBdeCount++; + iocbq->wcqe_cmpl.word3++; /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); - pbde = (struct ulp_bde64 *) - &iocbq->iocb.unsli3.sli3Words[4]; - if (len > LPFC_DATA_BUF_SIZE) - pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - else - pbde->tus.f.bdeSize = len; - - iocbq->iocb.unsli3.rcvsli3.acc_len += len; + iocbq->unsol_rcv_len = len; + iocbq->wcqe_cmpl.total_data_placed += len; tot_len += len; } else { iocbq = lpfc_sli_get_iocbq(vport->phba); if (!iocbq) { if (first_iocbq) { - first_iocbq->iocb.ulpStatus = - IOSTAT_FCP_RSP_ERROR; - first_iocbq->iocb.un.ulpWord[4] = - IOERR_NO_RESOURCES; + bf_set(lpfc_wcqe_c_status, + &first_iocbq->wcqe_cmpl, + IOSTAT_SUCCESS); + first_iocbq->wcqe_cmpl.parameter = + IOERR_NO_RESOURCES; } lpfc_in_buf_free(vport->phba, d_buf); continue; @@ -19232,17 +18867,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->context2 = d_buf; iocbq->context3 = NULL; - iocbq->iocb.ulpBdeCount = 1; + iocbq->wcqe_cmpl.word3 = 1; + if (len > LPFC_DATA_BUF_SIZE) - iocbq->iocb.un.cont64[0].tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; + iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; else - iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; + iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize = + len; tot_len += len; - iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; - - iocbq->iocb.un.rcvels.remoteID = sid; + iocbq->wcqe_cmpl.total_data_placed = tot_len; + bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest, + sid); list_add_tail(&iocbq->list, &first_iocbq->list); } } @@ -19310,7 +18947,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocbq = NULL; - union lpfc_wqe *wqe; + union lpfc_wqe128 *pwqe; struct lpfc_dmabuf *pcmd = NULL; uint32_t frame_len; int rc; @@ -19345,34 +18982,46 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, /* copyin the payload */ memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); - /* fill in BDE's for command */ - iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); - iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); - iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; - iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; - iocbq->context2 = pcmd; iocbq->vport = vport; - iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; - iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK; + iocbq->cmd_flag |= LPFC_USE_FCPWQIDX; + iocbq->num_bdes = 0; + + pwqe = &iocbq->wqe; + /* fill in BDE's for command */ + pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys); + pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys); + pwqe->gen_req.bde.tus.f.bdeSize = frame_len; + pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + + pwqe->send_frame.frame_len = frame_len; + pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr)); + pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1)); + pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2)); + pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3)); + pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4)); + pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5)); + + pwqe->generic.wqe_com.word7 = 0; + pwqe->generic.wqe_com.word10 = 0; + + bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME); + bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */ + bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */ + bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1); + bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1); + bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1); + bf_set(wqe_xc, &pwqe->generic.wqe_com, 1); + bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA); + bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag); + bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag); + bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3); + pwqe->generic.wqe_com.abort_tag = iocbq->iotag; + + iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl; - /* - * Setup rest of the iocb as though it were a WQE - * Build the SEND_FRAME WQE - */ - wqe = (union lpfc_wqe *)&iocbq->iocb; - - wqe->send_frame.frame_len = frame_len; - wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); - wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); - wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); - wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); - wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); - wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); - - iocbq->iocb.ulpCommand = CMD_SEND_FRAME; - iocbq->iocb.ulpLe = 1; - iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); if (rc == IOCB_ERROR) goto exit; @@ -21087,10 +20736,9 @@ lpfc_drain_txq(struct lpfc_hba *phba) struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; - struct lpfc_sglq *sglq; - union lpfc_wqe128 wqe; uint32_t txq_cnt = 0; struct lpfc_queue *wq; + int ret = 0; if (phba->link_flag & LS_MDS_LOOPBACK) { /* MDS WQE are posted only to first WQ*/ @@ -21129,44 +20777,33 @@ lpfc_drain_txq(struct lpfc_hba *phba) txq_cnt); break; } - sglq = __lpfc_sli_get_els_sglq(phba, piocbq); - if (!sglq) { - __lpfc_sli_ringtx_put(phba, pring, piocbq); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - break; - } txq_cnt--; - /* The xri and iocb resources secured, - * attempt to issue request - */ - piocbq->sli4_lxritag = sglq->sli4_lxritag; - piocbq->sli4_xritag = sglq->sli4_xritag; - if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) - fail_msg = "to convert bpl to sgl"; - else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) - fail_msg = "to convert iocb to wqe"; - else if (lpfc_sli4_wq_put(wq, &wqe)) - fail_msg = " - Wq is full"; - else - lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); + ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0); + if (ret && ret != IOCB_BUSY) { + fail_msg = " - Cannot send IO "; + piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; + } if (fail_msg) { + piocbq->cmd_flag |= LPFC_DRIVER_ABORTED; /* Failed means we can't issue and need to cancel */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2822 IOCB failed %s iotag 0x%x " - "xri 0x%x\n", - fail_msg, - piocbq->iotag, piocbq->sli4_xritag); + "xri 0x%x %d flg x%x\n", + fail_msg, piocbq->iotag, + piocbq->sli4_xritag, ret, + piocbq->cmd_flag); list_add_tail(&piocbq->list, &completions); fail_msg = NULL; } spin_unlock_irqrestore(&pring->ring_lock, iflags); + if (txq_cnt == 0 || ret == IOCB_BUSY) + break; } - /* Cancel all the IOCBs that cannot be issued */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + IOERR_SLI_ABORTED); return txq_cnt; } @@ -21214,7 +20851,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); if (cmd == CMD_XMIT_BLS_RSP64_WQE) return sglq->sli4_xritag; - numBdes = pwqeq->rsvd2; + numBdes = pwqeq->num_bdes; if (numBdes) { /* The addrHigh and addrLow fields within the WQE * have not been byteswapped yet so there is no @@ -21315,7 +20952,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, uint32_t ret = 0; /* NVME_LS and NVME_LS ABTS requests. */ - if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { + if (pwqe->cmd_flag & LPFC_IO_NVME_LS) { pring = phba->sli4_hba.nvmels_wq->pring; lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); @@ -21346,7 +20983,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } /* NVME_FCREQ and NVME_ABTS requests */ - if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { + if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; @@ -21368,7 +21005,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } /* NVMET requests */ - if (pwqe->iocb_flag & LPFC_IO_NVMET) { + if (pwqe->cmd_flag & LPFC_IO_NVMET) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ wq = qp->io_wq; pring = wq->pring; @@ -21434,7 +21071,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return WQE_NORESOURCE; /* Indicate the IO is being aborted by the driver. */ - cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; + cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED; abtswqe = &abtsiocb->wqe; memset(abtswqe, 0, sizeof(*abtswqe)); @@ -21453,15 +21090,15 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->hba_wqidx = cmdiocb->hba_wqidx; - abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; - if (cmdiocb->iocb_flag & LPFC_IO_FCP) - abtsiocb->iocb_flag |= LPFC_IO_FCP; - if (cmdiocb->iocb_flag & LPFC_IO_NVME) - abtsiocb->iocb_flag |= LPFC_IO_NVME; - if (cmdiocb->iocb_flag & LPFC_IO_FOF) - abtsiocb->iocb_flag |= LPFC_IO_FOF; + abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX; + if (cmdiocb->cmd_flag & LPFC_IO_FCP) + abtsiocb->cmd_flag |= LPFC_IO_FCP; + if (cmdiocb->cmd_flag & LPFC_IO_NVME) + abtsiocb->cmd_flag |= LPFC_IO_NVME; + if (cmdiocb->cmd_flag & LPFC_IO_FOF) + abtsiocb->cmd_flag |= LPFC_IO_FOF; abtsiocb->vport = vport; - abtsiocb->wqe_cmpl = cmpl; + abtsiocb->cmd_cmpl = cmpl; lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq); retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb); @@ -21472,7 +21109,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, xritag, cmdiocb->iotag, abtsiocb->iotag, retval); if (retval) { - cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; + cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED; __lpfc_sli_release_iocbq(phba, abtsiocb); } @@ -21834,8 +21471,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, /* MUST zero fields if buffer is reused by another protocol */ lpfc_ncmd->nvmeCmd = NULL; - lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; - lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; + lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL; if (phba->cfg_xpsgl && !phba->nvmet_support && !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) @@ -22549,3 +22185,192 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); } + +/** + * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted + * @phba: phba object + * @job: job entry of the command to be posted. + * + * Fill the common fields of the wqe for each of the command. + * + * Return codes: + * None + **/ +void +lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job) +{ + u8 cmnd; + u32 *pcmd; + u32 if_type = 0; + u32 fip, abort_tag; + struct lpfc_nodelist *ndlp = NULL; + union lpfc_wqe128 *wqe = &job->wqe; + struct lpfc_dmabuf *context2; + u32 els_id = LPFC_ELS_ID_DEFAULT; + u8 command_type = ELS_COMMAND_NON_FIP; + + fip = phba->hba_flag & HBA_FIP_SUPPORT; + /* The fcp commands will set command type */ + if (job->cmd_flag & LPFC_IO_FCP) + command_type = FCP_COMMAND; + else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK)) + command_type = ELS_COMMAND_FIP; + else + command_type = ELS_COMMAND_NON_FIP; + + abort_tag = job->iotag; + cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com); + + switch (cmnd) { + case CMD_ELS_REQUEST64_WQE: + if (job->cmd_flag & LPFC_IO_LIBDFC) + ndlp = job->context_un.ndlp; + else + ndlp = (struct lpfc_nodelist *)job->context1; + + /* CCP CCPE PV PRI in word10 were set in the memcpy */ + if (command_type == ELS_COMMAND_FIP) + els_id = ((job->cmd_flag & LPFC_FIP_ELS_ID_MASK) + >> LPFC_FIP_ELS_ID_SHIFT); + + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + context2 = (struct lpfc_dmabuf *)job->context2; + pcmd = (u32 *)context2->virt; + if (pcmd && (*pcmd == ELS_CMD_FLOGI || + *pcmd == ELS_CMD_SCR || + *pcmd == ELS_CMD_RDF || + *pcmd == ELS_CMD_EDC || + *pcmd == ELS_CMD_RSCN_XMT || + *pcmd == ELS_CMD_FDISC || + *pcmd == ELS_CMD_LOGO || + *pcmd == ELS_CMD_QFPA || + *pcmd == ELS_CMD_UVEM || + *pcmd == ELS_CMD_PLOGI)) { + bf_set(els_req64_sp, &wqe->els_req, 1); + bf_set(els_req64_sid, &wqe->els_req, + job->vport->fc_myDID); + + if ((*pcmd == ELS_CMD_FLOGI) && + !(phba->fc_topology == + LPFC_TOPOLOGY_LOOP)) + bf_set(els_req64_sid, &wqe->els_req, 0); + + bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->vpi_ids[job->vport->vpi]); + } else if (pcmd) { + bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + } + } + + bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + + bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); + bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); + break; + case CMD_XMIT_ELS_RSP64_WQE: + ndlp = (struct lpfc_nodelist *)job->context1; + + /* word4 */ + wqe->xmit_els_rsp.word4 = 0; + + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { + if (job->vport->fc_flag & FC_PT2PT) { + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, + job->vport->fc_myDID); + if (job->vport->fc_myDID == Fabric_DID) { + bf_set(wqe_els_did, + &wqe->xmit_els_rsp.wqe_dest, 0); + } + } + } + + bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); + + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { + bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); + bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, + job->vport->fc_myDID); + bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); + + if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com)) + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, + phba->vpi_ids[job->vport->vpi]); + } + command_type = OTHER_COMMAND; + break; + case CMD_GEN_REQUEST64_WQE: + /* Word 10 */ + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_SEQUENCE64_WQE: + if (phba->link_flag & LS_LOOPBACK_MODE) + bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); + + wqe->xmit_sequence.rsvd3 = 0; + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_BLS_RSP64_WQE: + bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); + bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); + bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); + bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, + phba->vpi_ids[phba->pport->vpi]); + bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, + LPFC_WQE_LENLOC_NONE); + /* Overwrite the pre-set comnd type with OTHER_COMMAND */ + command_type = OTHER_COMMAND; + break; + case CMD_FCP_ICMND64_WQE: /* task mgmt commands */ + case CMD_ABORT_XRI_WQE: /* abort iotag */ + case CMD_SEND_FRAME: /* mds loopback */ + /* cases already formatted for sli4 wqe - no chgs necessary */ + return; + default: + dump_stack(); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "6207 Invalid command 0x%x\n", + cmnd); + break; + } + + wqe->generic.wqe_com.abort_tag = abort_tag; + bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag); + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); +} |