diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_isr.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 237 |
1 files changed, 192 insertions, 45 deletions
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2572121b765b..7b3b702ef622 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -9,6 +9,7 @@ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <linux/t10-pi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_bsg_fc.h> @@ -17,7 +18,7 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); -static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, +static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); /** @@ -431,8 +432,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) "Register: 0x%x%x.\n", mb[7], mb[3]); if (err_level == ERR_LEVEL_NON_FATAL) { ql_log(ql_log_warn, vha, 0x5063, - "Not a fatal error, f/w has recovered " - "iteself.\n"); + "Not a fatal error, f/w has recovered itself.\n"); } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { ql_log(ql_log_fatal, vha, 0x5064, "Recoverable Fatal error: Chip reset " @@ -709,7 +709,7 @@ skip_rio: ha->isp_ops->fw_dump(vha, 1); ha->flags.fw_init_done = 0; - ha->flags.fw_started = 0; + QLA_FW_STOPPED(ha); if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { @@ -829,7 +829,7 @@ skip_rio: fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_init + ql_dbg_verbose, - vha, 0x0144, "LOOP DOWN detected," + vha, 0x00d8, "LOOP DOWN detected," "restore WWPN %016llx\n", wwn_to_u64(vha->port_name)); } @@ -973,6 +973,23 @@ skip_rio: if (mb[1] == 0xffff) goto global_port_update; + if (mb[1] == NPH_SNS_LID(ha)) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + + /* use handle_cnt for loop id/nport handle */ + if (IS_FWI2_CAPABLE(ha)) + handle_cnt = NPH_SNS; + else + handle_cnt = SIMPLE_NAME_SERVER; + if (mb[1] == handle_cnt) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + break; + } + /* Port logout */ fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); if (!fcport) @@ -1701,7 +1718,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NOXCB: vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0xd046, "Exchange starvation. Resetting RISC\n"); vha->hw->exch_starvation = 0; @@ -1781,6 +1798,79 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) sp->done(sp, 0); } +static void +qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) +{ + const char func[] = "NVME-IOCB"; + fc_port_t *fcport; + srb_t *sp; + struct srb_iocb *iocb; + struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; + uint16_t state_flags; + struct nvmefc_fcp_req *fd; + uint16_t ret = 0; + struct srb_iocb *nvme; + + sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); + if (!sp) + return; + + iocb = &sp->u.iocb_cmd; + fcport = sp->fcport; + iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status); + state_flags = le16_to_cpu(sts->state_flags); + fd = iocb->u.nvme.desc; + nvme = &sp->u.iocb_cmd; + + if (unlikely(nvme->u.nvme.aen_op)) + atomic_dec(&sp->vha->nvme_active_aen_cnt); + + /* + * State flags: Bit 6 and 0. + * If 0 is set, we don't care about 6. + * both cases resp was dma'd to host buffer + * if both are 0, that is good path case. + * if six is set and 0 is clear, we need to + * copy resp data from status iocb to resp buffer. + */ + if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { + iocb->u.nvme.rsp_pyld_len = 0; + } else if ((state_flags & SF_FCP_RSP_DMA)) { + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + } else if (state_flags & SF_NVME_ERSP) { + uint32_t *inbuf, *outbuf; + uint16_t iter; + + inbuf = (uint32_t *)&sts->nvme_ersp_data; + outbuf = (uint32_t *)fd->rspaddr; + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + iter = iocb->u.nvme.rsp_pyld_len >> 2; + for (; iter; iter--) + *outbuf++ = swab32(*inbuf++); + } else { /* unhandled case */ + ql_log(ql_log_warn, fcport->vha, 0x503a, + "NVME-%s error. Unhandled state_flags of %x\n", + sp->name, state_flags); + } + + fd->transferred_length = fd->payload_length - + le32_to_cpu(sts->residual_len); + + if (sts->entry_status) { + ql_log(ql_log_warn, fcport->vha, 0x5038, + "NVME-%s error - hdl=%x entry-status(%x).\n", + sp->name, sp->handle, sts->entry_status); + ret = QLA_FUNCTION_FAILED; + } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { + ql_log(ql_log_warn, fcport->vha, 0x5039, + "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", + sp->name, sp->handle, sts->comp_status, + le32_to_cpu(sts->residual_len), sts->ox_id); + ret = QLA_FUNCTION_FAILED; + } + sp->done(sp, ret); +} + /** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -1950,9 +2040,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * For type 3: ref & app tag is all 'f's * For type 0,1,2: app tag is all 'f's */ - if ((a_app_tag == 0xffff) && + if ((a_app_tag == T10_PI_APP_ESCAPE) && ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || - (a_ref_tag == 0xffffffff))) { + (a_ref_tag == T10_PI_REF_ESCAPE))) { uint32_t blocks_done, resid; sector_t lba_s = scsi_get_lba(cmd); @@ -1994,9 +2084,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) spt = page_address(sg_page(sg)) + sg->offset; spt += j; - spt->app_tag = 0xffff; + spt->app_tag = T10_PI_APP_ESCAPE; if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; + spt->ref_tag = T10_PI_REF_ESCAPE; } return 0; @@ -2263,6 +2353,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + if (sp->cmd_type != TYPE_SRB) { + req->outstanding_cmds[handle] = NULL; + ql_dbg(ql_dbg_io, vha, 0x3015, + "Unknown sp->cmd_type %x %p).\n", + sp->cmd_type, sp); + return; + } + + /* NVME completion. */ + if (sp->type == SRB_NVME_CMD) { + qla24xx_nvme_iocb_entry(vha, req, pkt); + return; + } + if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); return; @@ -2374,8 +2478,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301a, - "Mid-layer underflow " - "detected (0x%x of 0x%x bytes).\n", + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; @@ -2408,8 +2511,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { ql_dbg(ql_dbg_io, fcport->vha, 0x301d, - "Dropped frame(s) detected " - "(0x%x of 0x%x bytes).\n", + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; @@ -2420,8 +2522,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { ql_dbg(ql_dbg_io, fcport->vha, 0x301e, - "Mid-layer underflow " - "detected (0x%x of 0x%x bytes).\n", + "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); res = DID_ERROR << 16; @@ -2435,9 +2536,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) */ ql_dbg(ql_dbg_io, fcport->vha, 0x301f, - "Dropped frame(s) detected (0x%x " - "of 0x%x bytes).\n", resid, - scsi_bufflen(cp)); + "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; @@ -2619,8 +2719,9 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) * qla2x00_error_entry() - Process an error entry. * @ha: SCSI driver HA context * @pkt: Entry pointer + * return : 1=allow further error analysis. 0=no additional error analysis. */ -static void +static int qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; @@ -2631,7 +2732,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) int res = DID_ERROR << 16; ql_dbg(ql_dbg_async, vha, 0x502a, - "type of error status in response: 0x%x\n", pkt->entry_status); + "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", + pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); if (que >= ha->max_req_queues || !ha->req_q_map[que]) goto fatal; @@ -2641,18 +2743,35 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; - if (pkt->entry_type == NOTIFY_ACK_TYPE && - pkt->handle == QLA_TGT_SKIP_HANDLE) - return; + if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) + return 0; - sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); - if (sp) { - sp->done(sp, res); - return; + switch (pkt->entry_type) { + case NOTIFY_ACK_TYPE: + case STATUS_TYPE: + case STATUS_CONT_TYPE: + case LOGINOUT_PORT_IOCB_TYPE: + case CT_IOCB_TYPE: + case ELS_IOCB_TYPE: + case ABORT_IOCB_TYPE: + case MBX_IOCB_TYPE: + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(sp, res); + return 0; + } + break; + + case ABTS_RESP_24XX: + case CTIO_TYPE7: + case CTIO_CRC2: + default: + return 1; } fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue (%04x).\n", que); + return 0; } /** @@ -2708,6 +2827,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, sp->done(sp, 0); } +void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt, + struct req_que *req) +{ + srb_t *sp; + const char func[] = "LS4_IOCB"; + uint16_t comp_status; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + comp_status = le16_to_cpu(pkt->status); + sp->done(sp, comp_status); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -2721,6 +2855,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!ha->flags.fw_started) return; + if (rsp->qpair->cpuid != smp_processor_id()) + qla_cpu_update(rsp->qpair, smp_processor_id()); + while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -2733,9 +2870,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } if (pkt->entry_status != 0) { - qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); - - if (qlt_24xx_process_response_error(vha, pkt)) + if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; @@ -2768,7 +2903,8 @@ process_err: case ABTS_RECV_24XX: if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { /* ensure that the ATIO queue is empty */ - qlt_handle_abts_recv(vha, (response_t *)pkt); + qlt_handle_abts_recv(vha, rsp, + (response_t *)pkt); break; } else { /* drop through */ @@ -2777,11 +2913,16 @@ process_err: case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: - qlt_response_pkt_all_vps(vha, (response_t *)pkt); + qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); + break; + case PT_LS4_REQUEST: + qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, + rsp->req); break; case NOTIFY_ACK_TYPE: if (pkt->handle == QLA_TGT_SKIP_HANDLE) - qlt_response_pkt_all_vps(vha, (response_t *)pkt); + qlt_response_pkt_all_vps(vha, rsp, + (response_t *)pkt); else qla24xxx_nack_iocb_entry(vha, rsp->req, (struct nack_to_isp *)pkt); @@ -3156,10 +3297,10 @@ struct qla_init_msix_entry { }; static const struct qla_init_msix_entry msix_entries[] = { - { "qla2xxx (default)", qla24xx_msix_default }, - { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, - { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, - { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, + { "default", qla24xx_msix_default }, + { "rsp_q", qla24xx_msix_rsp_q }, + { "atio_q", qla83xx_msix_atio_q }, + { "qpair_multiq", qla2xxx_msix_rsp_q }, }; static const struct qla_init_msix_entry qla82xx_msix_entries[] = { @@ -3183,9 +3324,14 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) min_vecs++; } - ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, - ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &desc); + if (USER_CTRL_IRQ(ha)) { + /* user wants to control IRQ setting for target mode */ + ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, + ha->msix_count, PCI_IRQ_MSIX); + } else + ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, + ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, @@ -3239,7 +3385,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->handle = rsp; rsp->msix = qentry; scnprintf(qentry->name, sizeof(qentry->name), - "%s", msix_entries[i].name); + "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3247,7 +3393,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) else ret = request_irq(qentry->vector, msix_entries[i].handler, - 0, msix_entries[i].name, rsp); + 0, qentry->name, rsp); if (ret) goto msix_register_fail; qentry->have_irq = 1; @@ -3263,11 +3409,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) rsp->msix = qentry; qentry->handle = rsp; scnprintf(qentry->name, sizeof(qentry->name), - "%s", msix_entries[QLA_ATIO_VECTOR].name); + "qla2xxx%lu_%s", vha->host_no, + msix_entries[QLA_ATIO_VECTOR].name); qentry->in_use = 1; ret = request_irq(qentry->vector, msix_entries[QLA_ATIO_VECTOR].handler, - 0, msix_entries[QLA_ATIO_VECTOR].name, rsp); + 0, qentry->name, rsp); qentry->have_irq = 1; } |