summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2012-04-07 05:48:59 +0400
committerLen Brown <len.brown@intel.com>2012-04-07 05:48:59 +0400
commiteeaab2d8af2cf1d36d7086f22e9de42d6dd2995c (patch)
tree369b9c91a6d808944f07d2290fec6f9fe2731904 /drivers/scsi/qla2xxx/qla_os.c
parentee01e663373343c63e0e3d364d09f6155378dbcc (diff)
parentaaef292acf3a78d9c0bb6fb72226077d286b45d7 (diff)
downloadlinux-eeaab2d8af2cf1d36d7086f22e9de42d6dd2995c.tar.xz
Merge branches 'idle-fix' and 'misc' into release
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c435
1 files changed, 286 insertions, 149 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 036030c95339..a2f999273a5f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
-static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
return str;
}
-static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd)
+void
+qla2x00_sp_free_dma(void *vha, void *ptr)
{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
- sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
- if (!sp) {
- ql_log(ql_log_warn, vha, 0x3006,
- "Memory allocation failed for sp.\n");
- return sp;
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
}
- atomic_set(&sp->ref_count, 1);
- sp->fcport = fcport;
- sp->cmd = cmd;
- sp->flags = 0;
- CMD_SP(cmd) = (void *)sp;
- sp->ctx = NULL;
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
- return sp;
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ ctx1 = NULL;
+ }
+
+ CMD_SP(cmd) = NULL;
+ mempool_free(sp, ha->srb_mempool);
+}
+
+static void
+qla2x00_sp_compl(void *data, void *ptr, int res)
+{
+ struct qla_hw_data *ha = (struct qla_hw_data *)data;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2x00_sp_free_dma(ha, sp);
+ cmd->scsi_done(cmd);
}
static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
+ sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2x00_sp_free_dma;
+ sp->done = qla2x00_sp_compl;
+
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_sp_free_dma(ha, sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
srb_t *sp;
+ struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if ((sp->ctx) && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
+ cmd = GET_CMD_SP(sp);
switch (type) {
case WAIT_HOST:
match = 1;
break;
case WAIT_TARGET:
- match = sp->cmd->device->id == t;
+ match = cmd->device->id == t;
break;
case WAIT_LUN:
- match = (sp->cmd->device->id == t &&
- sp->cmd->device->lun == l);
+ match = (cmd->device->id == t &&
+ cmd->device->lun == l);
break;
}
if (!match)
continue;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- status = qla2x00_eh_wait_on_command(sp->cmd);
+ status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1219,7 +1273,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que, cnt;
unsigned long flags;
srb_t *sp;
- struct srb_ctx *ctx;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx ||
- (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
- IS_PROT_IO(sp)) {
- sp->cmd->result = res;
- qla2x00_sp_compl(ha, sp);
- } else {
- ctx = sp->ctx;
- if (ctx->type == SRB_ELS_CMD_RPT ||
- ctx->type == SRB_ELS_CMD_HST ||
- ctx->type == SRB_CT_CMD) {
- struct fc_bsg_job *bsg_job =
- ctx->u.bsg_job;
- if (bsg_job->request->msgcode
- == FC_BSG_HST_CT)
- kfree(sp->fcport);
- bsg_job->req->errors = 0;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
- kfree(sp->ctx);
- mempool_free(sp,
- ha->srb_mempool);
- } else {
- ctx->u.iocb_cmd->free(sp);
- }
- }
+ sp->done(vha, sp, res);
}
}
}
@@ -1488,9 +1517,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
@@ -1593,6 +1619,96 @@ iospace_error_exit:
}
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint16_t msix;
+ int cpus;
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+ "Invalid PCI mem region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+ /* 83XX 26XX always use MQ type access for queues
+ * - mbar 2, a.k.a region 4 */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+ pci_resource_len(ha->pdev, 4));
+
+ if (!ha->mqiobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+ "BAR2/region4 not enabled\n");
+ goto mqiobase_exit;
+ }
+
+ ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+ pci_resource_len(ha->pdev, 2));
+ if (ha->msixbase) {
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev,
+ QLA_83XX_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+ "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1769,7 +1885,7 @@ static struct isp_operations qla81xx_isp_ops = {
.fw_dump = qla81xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
- .beacon_blink = qla24xx_beacon_blink,
+ .beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
@@ -1815,6 +1931,43 @@ static struct isp_operations qla82xx_isp_ops = {
.iospace_config = qla82xx_iospace_config,
};
+static struct isp_operations qla83xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla83xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+};
+
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
@@ -1909,6 +2062,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2031:
+ ha->device_type |= DT_ISP2031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8031:
+ ha->device_type |= DT_ISP8031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -1966,7 +2135,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
- int bars, max_id, mem_only = 0;
+ int bars, mem_only = 0;
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
@@ -1980,7 +2149,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2020,9 +2191,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
- if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
pdev->needs_freset = 1;
- }
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
@@ -2030,9 +2200,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->optrom_size = OPTROM_SIZE_2300;
/* Assign ISP specific operations. */
- max_id = MAX_TARGETS_2200;
if (IS_QLA2100(ha)) {
- max_id = MAX_TARGETS_2100;
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
req_length = REQUEST_ENTRY_CNT_2100;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2044,6 +2213,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2055,6 +2225,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2068,6 +2239,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX_TYPE(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2082,6 +2254,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA25XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2096,6 +2269,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA81XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2110,6 +2284,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
} else if (IS_QLA82XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_82XX;
rsp_length = RESPONSE_ENTRY_CNT_82XX;
@@ -2123,14 +2298,31 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA83XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla83xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
+
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
"mbx_count=%d, req_length=%d, "
"rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
- "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+ "max_fibre_devices=%d.\n",
ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
- ha->nvram_npiv_size);
+ ha->nvram_npiv_size, ha->max_fibre_devices);
ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
"isp_ops=%p, flash_conf_off=%d, "
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
@@ -2204,7 +2396,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- host->max_id = max_id;
+ host->max_id = ha->max_fibre_devices;
host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
@@ -2251,7 +2443,7 @@ que_init:
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable) {
+ if (ha->mqenable || IS_QLA83XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2552,6 +2744,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (ha->mqiobase)
iounmap(ha->mqiobase);
+
+ if (IS_QLA83XX(ha) && ha->msixbase)
+ iounmap(ha->msixbase);
}
pci_release_selected_regions(ha->pdev, ha->bars);
@@ -2751,8 +2946,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
- ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
- &ha->gid_list_dma, GFP_KERNEL);
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
goto fail_free_init_cb;
@@ -2893,7 +3088,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -2967,7 +3162,8 @@ fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list,
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -3045,9 +3241,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->sfp_data)
dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
- if (ha->edc_data)
- dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
-
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
@@ -3062,8 +3255,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_destroy(ha->s_dma_pool);
if (ha->gid_list)
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
- ha->gid_list_dma);
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list, ha->gid_list_dma);
if (IS_QLA82XX(ha)) {
if (!list_empty(&ha->gbl_dsd_list)) {
@@ -3095,6 +3288,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
+ kfree(ha->swl);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
@@ -3661,75 +3855,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
-static void
-qla2x00_sp_free_dma(srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- if (sp->flags & SRB_DMA_VALID) {
- scsi_dma_unmap(cmd);
- sp->flags &= ~SRB_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
- dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
- scsi_prot_sg_count(cmd), cmd->sc_data_direction);
- sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
- /* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp);
- sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- dma_pool_free(ha->dl_dma_pool, sp->ctx,
- ((struct crc_context *)sp->ctx)->crc_ctx_dma);
- sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
- }
-
- if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx = sp->ctx;
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
- ctx->fcp_cmnd_dma);
- list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx->dsd_use_cnt;
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
-
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
- cmd->scsi_done(cmd);
-}
-
-void
-qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
- sp, sp->cmd);
- if (ql2xextended_error_logging & ql_dbg_io)
- BUG();
- return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
- return;
- qla2x00_sp_final_compl(ha, sp);
-}
-
/**************************************************************************
* qla2x00_timer
*
@@ -3800,7 +3925,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3889,7 +4014,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 8
+#define FW_BLOBS 10
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3898,6 +4023,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP25XX 5
#define FW_ISP81XX 6
#define FW_ISP82XX 7
+#define FW_ISP2031 8
+#define FW_ISP8031 9
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3907,6 +4034,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
#define FW_FILE_ISP82XX "ql8200_fw.bin"
+#define FW_FILE_ISP2031 "ql2600_fw.bin"
+#define FW_FILE_ISP8031 "ql8300_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3919,6 +4048,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
{ .name = FW_FILE_ISP82XX, },
+ { .name = FW_FILE_ISP2031, },
+ { .name = FW_FILE_ISP8031, },
};
struct fw_blob *
@@ -3927,7 +4058,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct fw_blob *blob;
- blob = NULL;
if (IS_QLA2100(ha)) {
blob = &qla_fw_blobs[FW_ISP21XX];
} else if (IS_QLA2200(ha)) {
@@ -3944,6 +4074,12 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP81XX];
} else if (IS_QLA82XX(ha)) {
blob = &qla_fw_blobs[FW_ISP82XX];
+ } else if (IS_QLA2031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2031];
+ } else if (IS_QLA8031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP8031];
+ } else {
+ return NULL;
}
mutex_lock(&qla_fw_lock);
@@ -4265,6 +4401,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },