summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/BusLogic.c52
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c200
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h65
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c175
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1331
-rw-r--r--drivers/scsi/be2iscsi/be_main.h182
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c290
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h13
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c61
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h14
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c18
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h8
-rw-r--r--drivers/scsi/dc395x.c24
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c31
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c1
-rw-r--r--drivers/scsi/dpt_i2o.c35
-rw-r--r--drivers/scsi/dpti.h1
-rw-r--r--drivers/scsi/eata_pio.c2
-rw-r--r--drivers/scsi/esas2r/Kconfig5
-rw-r--r--drivers/scsi/esas2r/Makefile5
-rw-r--r--drivers/scsi/esas2r/atioctl.h1254
-rw-r--r--drivers/scsi/esas2r/atvda.h1319
-rw-r--r--drivers/scsi/esas2r/esas2r.h1431
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c1184
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1521
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c1771
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c942
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c877
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2110
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c254
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h118
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2032
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c306
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c524
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fnic/fnic.h18
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c390
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c12
-rw-r--r--drivers/scsi/fnic/fnic_isr.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c156
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c394
-rw-r--r--drivers/scsi/fnic/fnic_stats.h116
-rw-r--r--drivers/scsi/fnic/fnic_trace.c185
-rw-r--r--drivers/scsi/fnic/fnic_trace.h3
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h4
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hpsa.c65
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c154
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h46
-rw-r--r--drivers/scsi/ipr.c14
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libiscsi.c109
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c84
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c194
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c363
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h118
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c423
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c200
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h33
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c82
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/Makefile2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/osd/osd_uld.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c153
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h6
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c152
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h74
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c523
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h15
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c60
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h74
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c100
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c297
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c166
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h41
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c3716
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h551
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c213
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c71
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c73
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h36
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c90
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c13
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h30
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h14
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h12
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h14
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c65
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c205
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c297
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c818
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c151
-rw-r--r--drivers/scsi/scsi_error.c274
-rw-r--r--drivers/scsi/scsi_lib.c70
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c49
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c149
-rw-r--r--drivers/scsi/sd.c245
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/scsi/st.c27
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/scsi/tmscsim.c14
-rw-r--r--drivers/scsi/tmscsim.h1
-rw-r--r--drivers/scsi/ufs/ufs.h156
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c99
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c57
-rw-r--r--drivers/scsi/ufs/ufshcd.c1494
-rw-r--r--drivers/scsi/ufs/ufshcd.h113
-rw-r--r--drivers/scsi/ufs/ufshci.h24
-rw-r--r--drivers/scsi/ufs/unipro.h151
194 files changed, 31075 insertions, 3007 deletions
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..972f8176665f 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -26,8 +26,8 @@
*/
-#define blogic_drvr_version "2.1.16"
-#define blogic_drvr_date "18 July 2002"
+#define blogic_drvr_version "2.1.17"
+#define blogic_drvr_date "12 September 2013"
#include <linux/module.h>
#include <linux/init.h>
@@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)
caller.
*/
-static void blogic_dealloc_ccb(struct blogic_ccb *ccb)
+static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
{
struct blogic_adapter *adapter = ccb->adapter;
- scsi_dma_unmap(ccb->command);
- pci_unmap_single(adapter->pci_device, ccb->sensedata,
+ if (ccb->command != NULL)
+ scsi_dma_unmap(ccb->command);
+ if (dma_unmap)
+ pci_unmap_single(adapter->pci_device, ccb->sensedata,
ccb->sense_datalen, PCI_DMA_FROMDEVICE);
ccb->command = NULL;
@@ -696,7 +698,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
pci_device)) != NULL) {
- struct blogic_adapter *adapter = adapter;
+ struct blogic_adapter *host_adapter = adapter;
struct blogic_adapter_info adapter_info;
enum blogic_isa_ioport mod_ioaddr_req;
unsigned char bus;
@@ -744,9 +746,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
known and enabled, note that the particular Standard ISA I/O
Address should not be probed.
*/
- adapter->io_addr = io_addr;
- blogic_intreset(adapter);
- if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+ host_adapter->io_addr = io_addr;
+ blogic_intreset(host_adapter);
+ if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
&adapter_info, sizeof(adapter_info)) ==
sizeof(adapter_info)) {
if (adapter_info.isa_port < 6)
@@ -762,7 +764,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
I/O Address assigned at system initialization.
*/
mod_ioaddr_req = BLOGIC_IO_DISABLE;
- blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+ blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
sizeof(mod_ioaddr_req), NULL, 0);
/*
For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +781,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
fetch_localram.count = sizeof(autoscsi_byte45);
- blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
+ blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
&fetch_localram, sizeof(fetch_localram),
&autoscsi_byte45,
sizeof(autoscsi_byte45));
- blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
- sizeof(id));
+ blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+ &id, sizeof(id));
if (id.fw_ver_digit1 == '5')
force_scan_order =
autoscsi_byte45.force_scan_order;
@@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Place CCB back on the Host Adapter's free list.
*/
- blogic_dealloc_ccb(ccb);
-#if 0 /* this needs to be redone different for new EH */
+ blogic_dealloc_ccb(ccb, 1);
+#if 0 /* this needs to be redone different for new EH */
/*
Bus Device Reset CCBs have the command field
non-NULL only when a Bus Device Reset was requested
@@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (ccb->status == BLOGIC_CCB_RESET &&
ccb->tgt_id == tgt_id) {
command = ccb->command;
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16;
command->scsi_done(command);
@@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Place CCB back on the Host Adapter's free list.
*/
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
/*
Call the SCSI Command Completion Routine.
*/
@@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
int buflen = scsi_bufflen(command);
int count;
struct blogic_ccb *ccb;
+ dma_addr_t sense_buf;
/*
SCSI REQUEST_SENSE commands will be executed automatically by the
@@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
}
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
- ccb->sensedata = pci_map_single(adapter->pci_device,
+ ccb->command = command;
+ sense_buf = pci_map_single(adapter->pci_device,
command->sense_buffer, ccb->sense_datalen,
PCI_DMA_FROMDEVICE);
- ccb->command = command;
+ if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
+ blogic_err("DMA mapping for sense data buffer failed\n",
+ adapter);
+ blogic_dealloc_ccb(ccb, 0);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ ccb->sensedata = sense_buf;
command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) {
/*
@@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
ccb)) {
blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
command->scsi_done(command);
}
@@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
if (ccb->status == BLOGIC_CCB_ACTIVE)
- blogic_dealloc_ccb(ccb);
+ blogic_dealloc_ccb(ccb, 1);
/*
* Wait a few seconds between the Host Adapter Hard Reset which
* initiates a SCSI Bus Reset and issuing any SCSI Commands. Some
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 92ff027746f2..fe25677a5511 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -601,6 +601,7 @@ config SCSI_ARCMSR
To compile this driver as a module, choose M here: the
module will be called arcmsr (modprobe arcmsr).
+source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b607ba4f5630..149bb6bf1849 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index c67e401954c5..d8145888e66a 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2511,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
- printk(" host_busy %u, host_no %d, last_reset %d,\n",
- s->host_busy, s->host_no, (unsigned)s->last_reset);
+ printk(" host_busy %u, host_no %d,\n",
+ s->host_busy, s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n",
- shost->host_busy, shost->last_reset, shost->max_id,
+ " host_busy %u, max_id %u, max_lun %u, max_channel %u\n",
+ shost->host_busy, shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 6917b4f5ac9e..22d5a949ec83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
* ID as valid.
*/
if (ahc_get_pci_function(pci) > 0
- && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice)
+ && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
&& SUBID_9005_MFUNCENB(subdevice) == 0)
return (NULL);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 777e7c0bbb4b..2e28f6c419fe 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -128,7 +128,7 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout 120000 /* 5s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index e66aa7c11a8a..3338391b64de 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -17,9 +17,9 @@
#include <scsi/iscsi_proto.h>
+#include "be_main.h"
#include "be.h"
#include "be_mgmt.h"
-#include "be_main.h"
int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
{
@@ -158,8 +158,10 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
struct be_cmd_resp_hdr *ioctl_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- if (beiscsi_error(phba))
+ if (beiscsi_error(phba)) {
+ free_mcc_tag(&phba->ctrl, tag);
return -EIO;
+ }
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
@@ -173,7 +175,11 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
- rc = -EAGAIN;
+ rc = -EBUSY;
+
+ /* decrement the mccq used count */
+ atomic_dec(&phba->ctrl.mcc_obj.q.used);
+
goto release_mcc_tag;
} else
rc = 0;
@@ -208,10 +214,18 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
- if (ioctl_resp_hdr->response_length)
- goto release_mcc_tag;
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : Insufficent Buffer Error "
+ "Resp_Len : %d Actual_Resp_Len : %d\n",
+ ioctl_resp_hdr->response_length,
+ ioctl_resp_hdr->actual_resp_len);
+
+ rc = -EAGAIN;
+ goto release_mcc_tag;
}
- rc = -EAGAIN;
+ rc = -EIO;
}
release_mcc_tag:
@@ -363,7 +377,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_UP;
+ phba->state = BE_ADAPTER_LINK_UP;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -486,33 +500,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
**/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- uint32_t wait = 0;
+ unsigned long timeout;
+ bool read_flag = false;
+ int ret = 0, i;
u32 ready;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
- do {
+ if (beiscsi_error(phba))
+ return -EIO;
- if (beiscsi_error(phba))
- return -EIO;
+ timeout = jiffies + (HZ * 110);
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
- if (ready)
- break;
+ do {
+ for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
+ ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ if (ready) {
+ read_flag = true;
+ break;
+ }
+ mdelay(1);
+ }
- if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
+ if (!read_flag) {
+ wait_event_timeout(rdybit_check_q,
+ (read_flag != true),
+ HZ * 5);
+ }
+ } while ((time_before(jiffies, timeout)) && !read_flag);
+
+ if (!read_flag) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
phba->fw_timeout = true;
beiscsi_ue_detect(phba);
- return -EBUSY;
- }
+ ret = -EBUSY;
+ }
- mdelay(1);
- wait++;
- } while (true);
- return 0;
+ return ret;
}
/*
@@ -699,7 +727,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+ WARN_ON(atomic_read(&mccq->used) >= mccq->len);
wrb = queue_head_node(mccq);
memset(wrb, 0, sizeof(*wrb));
wrb->tag0 = (mccq->head & 0x000000FF) << 16;
@@ -1009,10 +1037,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
return status;
}
+/**
+ * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
+ * @ctrl: ptr to ctrl_info
+ * @cq: Completion Queue
+ * @dq: Default Queue
+ * @lenght: ring size
+ * @entry_size: size of each entry in DEFQ
+ * @is_header: Header or Data DEFQ
+ * @ulp_num: Bind to which ULP
+ *
+ * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
+ * on this queue by the FW
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ *
+ **/
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
- int entry_size)
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_defq_create_req *req = embedded_payload(wrb);
@@ -1030,6 +1077,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_be_default_pdu_context,
@@ -1067,22 +1119,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (!status) {
+ struct be_ring *defq_ring;
struct be_defq_create_resp *resp = embedded_payload(wrb);
dq->id = le16_to_cpu(resp->id);
dq->created = true;
+ if (is_header)
+ defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
+ else
+ defq_ring = &phba->phwi_ctrlr->
+ default_pdu_data[ulp_num];
+
+ defq_ring->id = dq->id;
+
+ if (!phba->fw_config.dual_ulp_aware) {
+ defq_ring->ulp_num = BEISCSI_ULP0;
+ defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
+ } else {
+ defq_ring->ulp_num = resp->ulp_num;
+ defq_ring->doorbell_offset = resp->doorbell_offset;
+ }
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
-int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
- struct be_queue_info *wrbq)
+/**
+ * be_cmd_wrbq_create()- Create WRBQ
+ * @ctrl: ptr to ctrl_info
+ * @q_mem: memory details for the queue
+ * @wrbq: queue info
+ * @pwrb_context: ptr to wrb_context
+ * @ulp_num: ULP on which the WRBQ is to be created
+ *
+ * Create WRBQ on the passed ULP_NUM.
+ *
+ **/
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem,
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_wrbq_create_req *req = embedded_payload(wrb);
struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
spin_lock(&ctrl->mbox_lock);
@@ -1093,17 +1176,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ if (phba->fw_config.dual_ulp_aware) {
+ req->ulp_num = ulp_num;
+ req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+ req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+ }
+
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
if (!status) {
wrbq->id = le16_to_cpu(resp->cid);
wrbq->created = true;
+
+ pwrb_context->cid = wrbq->id;
+ if (!phba->fw_config.dual_ulp_aware) {
+ pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
+ pwrb_context->ulp_num = BEISCSI_ULP0;
+ } else {
+ pwrb_context->ulp_num = resp->ulp_num;
+ pwrb_context->doorbell_offset = resp->doorbell_offset;
+ }
}
spin_unlock(&ctrl->mbox_lock);
return status;
}
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_post_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_remove_template_pages_req *req = embedded_payload(wrb);
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+
+ memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
+ sizeof(*req));
+
+ req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+
+ status = be_mbox_notify(ctrl);
+ spin_unlock(&ctrl->mbox_lock);
+ return status;
+}
+
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem,
u32 page_offset, u32 num_pages)
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 99073086dfe0..627ebbe0172c 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -40,6 +40,7 @@ struct be_mcc_wrb {
u32 tag1; /* dword 3 */
u32 rsvd; /* dword 4 */
union {
+#define EMBED_MBX_MAX_PAYLOAD_SIZE 220
u8 embedded_payload[236]; /* used by embedded cmds */
struct be_sge sgl[19]; /* used by non-embedded cmds */
} payload;
@@ -162,6 +163,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
@@ -217,6 +220,10 @@ struct phys_addr {
u32 hi;
};
+struct virt_addr {
+ u32 lo;
+ u32 hi;
+};
/**************************
* BE Command definitions *
**************************/
@@ -722,7 +729,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl);
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq,
struct be_queue_info *dq, int length,
- int entry_size);
+ int entry_size, uint8_t is_header,
+ uint8_t ulp_num);
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+ struct be_dma_mem *q_mem);
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset,
@@ -731,7 +744,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
- struct be_queue_info *wrbq);
+ struct be_queue_info *wrbq,
+ struct hwi_wrb_context *pwrb_context,
+ uint8_t ulp_num);
bool is_link_state_evt(u32 trailer);
@@ -776,7 +791,9 @@ struct be_defq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
- u8 rsvd0;
+#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */
+#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */
+ u8 dua_feature;
struct be_default_pdu_context context;
struct phys_addr pages[8];
} __packed;
@@ -784,6 +801,27 @@ struct be_defq_create_req {
struct be_defq_create_resp {
struct be_cmd_req_hdr hdr;
u16 id;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
+} __packed;
+
+struct be_post_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1
+ u16 type;
+ struct phys_addr scratch_pa;
+ struct virt_addr scratch_va;
+ struct virt_addr pages_va;
+ struct phys_addr pages[16];
+} __packed;
+
+struct be_remove_template_pages_req {
+ struct be_cmd_req_hdr hdr;
+ u16 type;
u16 rsvd0;
} __packed;
@@ -800,14 +838,18 @@ struct be_wrbq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u8 ulp_num;
- u8 rsvd0;
+ u8 dua_feature;
struct phys_addr pages[8];
} __packed;
struct be_wrbq_create_resp {
struct be_cmd_resp_hdr resp_hdr;
u16 cid;
- u16 rsvd0;
+ u8 rsvd0;
+ u8 ulp_num;
+ u32 doorbell_offset;
+ u16 register_set;
+ u16 doorbell_format;
} __packed;
#define SOL_CID_MASK 0x0000FFC0
@@ -1002,6 +1044,7 @@ union tcp_upload_params {
} __packed;
struct be_ulp_fw_cfg {
+#define BEISCSI_ULP_ISCSI_INI_MODE 0x10
u32 ulp_mode;
u32 etx_base;
u32 etx_count;
@@ -1017,14 +1060,26 @@ struct be_ulp_fw_cfg {
u32 icd_count;
};
+struct be_ulp_chain_icd {
+ u32 chain_base;
+ u32 chain_count;
+};
+
struct be_fw_cfg {
struct be_cmd_req_hdr hdr;
u32 be_config_number;
u32 asic_revision;
u32 phys_port;
+#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10
+#define BEISCSI_FUNC_DUA_MODE 0x800
u32 function_mode;
struct be_ulp_fw_cfg ulp[2];
u32 function_caps;
+ u32 cqid_base;
+ u32 cqid_count;
+ u32 eqid_base;
+ u32 eqid_count;
+ struct be_ulp_chain_icd chain_icd[2];
} __packed;
struct be_cmd_get_all_if_id_req {
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ef36be003f67..ffadbee0b4d9 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
- shost = phba->shost;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_session_create\n");
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ return NULL;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_session_create\n");
+ }
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
}
+ shost = phba->shost;
cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
shost, cmds_max,
sizeof(*beiscsi_sess),
@@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
@@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST;
}
+ pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
+ beiscsi_ep->ep_cid)];
+
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
+ beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
@@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
{
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
- if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+ if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
beiscsi_create_ipv4_iface(phba);
+ kfree(if_info);
+ }
- if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+ if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
beiscsi_create_ipv6_iface(phba);
+ kfree(if_info);
+ }
}
void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
@@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
uint32_t rm_len = dt_len;
int ret = 0 ;
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
nla_for_each_attr(attrib, data, dt_len, rm_len) {
iface_param = nla_data(attrib);
@@ -512,59 +534,60 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
struct iscsi_iface *iface, int param,
char *buf)
{
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
int len, ip_type = BE2_IPV4;
- memset(&if_info, 0, sizeof(if_info));
-
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
ip_type = BE2_IPV6;
len = mgmt_get_if_info(phba, ip_type, &if_info);
- if (len)
+ if (len) {
+ kfree(if_info);
return len;
+ }
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
- len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
break;
case ISCSI_NET_PARAM_IPV6_ADDR:
- len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+ len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
- if (!if_info.dhcp_state)
+ if (!if_info->dhcp_state)
len = sprintf(buf, "static\n");
else
len = sprintf(buf, "dhcp\n");
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
- len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+ len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
- (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
? "Disabled\n" : "Enabled\n");
break;
case ISCSI_NET_PARAM_VLAN_ID:
- if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
- (if_info.vlan_priority &
+ (if_info->vlan_priority &
ISCSI_MAX_VLAN_ID));
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
- if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+ if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
- ((if_info.vlan_priority >> 13) &
+ ((if_info->vlan_priority >> 13) &
ISCSI_MAX_VLAN_PRIORITY));
break;
default:
WARN_ON(1);
}
+ kfree(if_info);
return len;
}
@@ -577,6 +600,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
struct be_cmd_get_def_gateway_resp gateway;
int len = -ENOSYS;
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ }
+
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
case ISCSI_NET_PARAM_IPV4_SUBNET:
@@ -672,8 +701,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
session->max_burst = 262144;
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
- if ((conn->max_xmit_dlength > 65536) ||
- (conn->max_xmit_dlength == 0))
+ if (conn->max_xmit_dlength > 65536)
conn->max_xmit_dlength = 65536;
default:
return 0;
@@ -727,7 +755,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+ ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
}
@@ -795,9 +823,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_get_host_param,"
- " param= %d\n", param);
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_get_host_param,"
+ " param = %d\n", param);
+ }
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
@@ -840,7 +875,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
struct be_cmd_get_nic_conf_resp resp;
int rc;
- if (strlen(phba->mac_address))
+ if (phba->mac_addr_set)
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
memset(&resp, 0, sizeof(resp));
@@ -848,6 +883,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
if (rc)
return rc;
+ phba->mac_addr_set = true;
memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
}
@@ -923,6 +959,10 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length, params,
+ conn->max_recv_dlength);
+
}
/**
@@ -935,10 +975,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
+ struct beiscsi_hba *phba;
- beiscsi_log(beiscsi_conn->phba, KERN_INFO,
- BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_conn_start\n");
+ phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return -EBUSY;
+ } else {
+ beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+ BEISCSI_LOG_CONFIG,
+ "BS_%d : In beiscsi_conn_start\n");
+ }
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
@@ -960,15 +1009,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
*/
static int beiscsi_get_cid(struct beiscsi_hba *phba)
{
- unsigned short cid = 0xFFFF;
-
- if (!phba->avlbl_cids)
- return cid;
-
- cid = phba->cid_array[phba->cid_alloc++];
- if (phba->cid_alloc == phba->params.cxns_per_ctrl)
- phba->cid_alloc = 0;
- phba->avlbl_cids--;
+ unsigned short cid = 0xFFFF, cid_from_ulp;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+
+ /* Find the ULP which has more CID available */
+ cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
+ BEISCSI_ULP0_AVLBL_CID(phba) : 0;
+ cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
+ BEISCSI_ULP1_AVLBL_CID(phba) : 0;
+ cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
+ BEISCSI_ULP0 : BEISCSI_ULP1;
+
+ if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
+ cid_info = phba->cid_array_info[cid_from_ulp];
+ if (!cid_info->avlbl_cids)
+ return cid;
+
+ cid = cid_info->cid_array[cid_info->cid_alloc++];
+
+ if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
+ phba, cid_from_ulp))
+ cid_info->cid_alloc = 0;
+
+ cid_info->avlbl_cids--;
+ }
return cid;
}
@@ -979,10 +1044,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
*/
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{
- phba->avlbl_cids++;
- phba->cid_array[phba->cid_free++] = cid;
- if (phba->cid_free == phba->params.cxns_per_ctrl)
- phba->cid_free = 0;
+ uint16_t cid_post_ulp;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_wrb_context *pwrb_context;
+ struct ulp_cid_info *cid_info = NULL;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+ cid_post_ulp = pwrb_context->ulp_num;
+
+ cid_info = phba->cid_array_info[cid_post_ulp];
+ cid_info->avlbl_cids++;
+
+ cid_info->cid_array[cid_info->cid_free++] = cid;
+ if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
+ cid_info->cid_free = 0;
}
/**
@@ -1135,7 +1212,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (phba->state != BE_ADAPTER_UP) {
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ ret = -EBUSY;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : In PCI_ERROR Recovery\n");
+ return ERR_PTR(ret);
+ } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
ret = -EBUSY;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : The Adapter Port state is Down!!!\n");
@@ -1260,6 +1342,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
+ if (phba->state & BE_ADAPTER_PCI_ERR) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : PCI_ERROR Recovery\n");
+ goto free_ep;
+ }
+
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid,
mgmt_invalidate_flag,
@@ -1272,6 +1360,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
+free_ep:
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a1f5ac7a9806..1f375051483a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tMiscellaneous Events : 0x04\n"
"\t\t\t\tError Handling : 0x08\n"
"\t\t\t\tIO Path Events : 0x10\n"
- "\t\t\t\tConfiguration Path : 0x20\n");
+ "\t\t\t\tConfiguration Path : 0x20\n"
+ "\t\t\t\tiSCSI Protocol : 0x40\n");
DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
+DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+ beiscsi_active_session_disp, NULL);
+DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+ beiscsi_free_session_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
&dev_attr_beiscsi_drvr_ver,
&dev_attr_beiscsi_adapter_family,
&dev_attr_beiscsi_fw_ver,
- &dev_attr_beiscsi_active_cid_count,
+ &dev_attr_beiscsi_active_session_count,
+ &dev_attr_beiscsi_free_session_count,
+ &dev_attr_beiscsi_phys_port,
NULL,
};
@@ -239,6 +246,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
spin_unlock_bh(&session->lock);
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ aborted_io_task->pwrb_handle->pwrb,
+ 1);
+
conn = aborted_task->conn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba;
@@ -316,6 +328,11 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
continue;
+ /* Invalidate WRB Posted for this Task */
+ AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+ abrt_io_task->pwrb_handle->pwrb,
+ 1);
+
inv_tbl->cid = cid;
inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
num_invalidate++;
@@ -699,30 +716,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
return status;
}
+/**
+ * beiscsi_get_params()- Set the config paramters
+ * @phba: ptr device priv structure
+ **/
static void beiscsi_get_params(struct beiscsi_hba *phba)
{
- phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
- - (phba->fw_config.iscsi_cid_count
- + BE2_TMFS
- + BE2_NOPOUT_REQ));
- phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
- phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
- phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
+ uint32_t total_cid_count = 0;
+ uint32_t total_icd_count = 0;
+ uint8_t ulp_num = 0;
+
+ total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint32_t align_mask = 0;
+ uint32_t icd_post_per_page = 0;
+ uint32_t icd_count_unavailable = 0;
+ uint32_t icd_start = 0, icd_count = 0;
+ uint32_t icd_start_align = 0, icd_count_align = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+
+ /* Get ICD count that can be posted on each page */
+ icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
+ sizeof(struct iscsi_sge)));
+ align_mask = (icd_post_per_page - 1);
+
+ /* Check if icd_start is aligned ICD per page posting */
+ if (icd_start % icd_post_per_page) {
+ icd_start_align = ((icd_start +
+ icd_post_per_page) &
+ ~(align_mask));
+ phba->fw_config.
+ iscsi_icd_start[ulp_num] =
+ icd_start_align;
+ }
+
+ icd_count_align = (icd_count & ~align_mask);
+
+ /* ICD discarded in the process of alignment */
+ if (icd_start_align)
+ icd_count_unavailable = ((icd_start_align -
+ icd_start) +
+ (icd_count -
+ icd_count_align));
+
+ /* Updated ICD count available */
+ phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
+ icd_count_unavailable);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Aligned ICD values\n"
+ "\t ICD Start : %d\n"
+ "\t ICD Count : %d\n"
+ "\t ICD Discarded : %d\n",
+ phba->fw_config.
+ iscsi_icd_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ icd_count_unavailable);
+ break;
+ }
+ }
+
+ total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ phba->params.ios_per_ctrl = (total_icd_count -
+ (total_cid_count +
+ BE2_TMFS + BE2_NOPOUT_REQ));
+ phba->params.cxns_per_ctrl = total_cid_count;
+ phba->params.asyncpdus_per_ctrl = total_cid_count;
+ phba->params.icds_per_ctrl = total_icd_count;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
phba->params.eq_timer = 64;
- phba->params.num_eq_entries =
- (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
- + BE2_TMFS) / 512) + 1) * 512;
- phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
- ? 1024 : phba->params.num_eq_entries;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : phba->params.num_eq_entries=%d\n",
- phba->params.num_eq_entries);
- phba->params.num_cq_entries =
- (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
- + BE2_TMFS) / 512) + 1) * 512;
+ phba->params.num_eq_entries = 1024;
+ phba->params.num_cq_entries = 1024;
phba->params.wrbs_per_cxn = 256;
}
@@ -1613,8 +1685,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
WARN_ON(!pasync_handle);
- pasync_handle->cri =
- BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+ pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
pasync_handle->is_header = is_header;
pasync_handle->buffer_len = dpl;
*pcq_index = index;
@@ -1674,18 +1746,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba,
}
static void hwi_free_async_msg(struct beiscsi_hba *phba,
- unsigned int cri)
+ struct hwi_async_pdu_context *pasync_ctx,
+ unsigned int cri)
{
- struct hwi_controller *phwi_ctrlr;
- struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle, *tmp_handle;
struct list_head *plist;
- phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
-
plist = &pasync_ctx->async_entry[cri].wait_queue.list;
-
list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
list_del(&pasync_handle->link);
@@ -1720,7 +1787,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
}
static void hwi_post_async_buffers(struct beiscsi_hba *phba,
- unsigned int is_header)
+ unsigned int is_header, uint8_t ulp_num)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_async_pdu_context *pasync_ctx;
@@ -1728,13 +1795,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
struct list_head *pfree_link, *pbusy_list;
struct phys_addr *pasync_sge;
unsigned int ring_id, num_entries;
- unsigned int host_write_num;
+ unsigned int host_write_num, doorbell_offset;
unsigned int writables;
unsigned int i = 0;
u32 doorbell = 0;
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
num_entries = pasync_ctx->num_entries;
if (is_header) {
@@ -1742,13 +1809,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
pasync_ctx->async_header.free_entries);
pfree_link = pasync_ctx->async_header.free_list.next;
host_write_num = pasync_ctx->async_header.host_write_ptr;
- ring_id = phwi_ctrlr->default_pdu_hdr.id;
+ ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
+ doorbell_offset;
} else {
writables = min(pasync_ctx->async_data.writables,
pasync_ctx->async_data.free_entries);
pfree_link = pasync_ctx->async_data.free_list.next;
host_write_num = pasync_ctx->async_data.host_write_ptr;
- ring_id = phwi_ctrlr->default_pdu_data.id;
+ ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
+ doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
+ doorbell_offset;
}
writables = (writables / 8) * 8;
@@ -1796,7 +1867,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
<< DB_DEF_PDU_CQPROC_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va + doorbell_offset);
}
}
@@ -1808,9 +1879,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle = NULL;
unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
pdpdu_cqe, &cq_index);
@@ -1819,8 +1894,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
hwi_update_async_writables(phba, pasync_ctx,
pasync_handle->is_header, cq_index);
- hwi_free_async_msg(phba, pasync_handle->cri);
- hwi_post_async_buffers(phba, pasync_handle->is_header);
+ hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
}
static unsigned int
@@ -1859,7 +1936,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
phdr, hdr_len, pfirst_buffer,
offset);
- hwi_free_async_msg(phba, cri);
+ hwi_free_async_msg(phba, pasync_ctx, cri);
return 0;
}
@@ -1875,13 +1952,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
struct pdu_base *ppdu;
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ BE_GET_CRI_FROM_CID(beiscsi_conn->
+ beiscsi_conn_cid)));
list_del(&pasync_handle->link);
if (pasync_handle->is_header) {
pasync_ctx->async_header.busy_entries--;
if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
- hwi_free_async_msg(phba, cri);
+ hwi_free_async_msg(phba, pasync_ctx, cri);
BUG();
}
@@ -1936,9 +2016,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
struct hwi_async_pdu_context *pasync_ctx;
struct async_pdu_handle *pasync_handle = NULL;
unsigned int cq_index = -1;
+ uint16_t cri_index = BE_GET_CRI_FROM_CID(
+ beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
- pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+ BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+ cri_index));
+
pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
pdpdu_cqe, &cq_index);
@@ -1947,7 +2032,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
pasync_handle->is_header, cq_index);
hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
- hwi_post_async_buffers(phba, pasync_handle->is_header);
+ hwi_post_async_buffers(phba, pasync_handle->is_header,
+ BEISCSI_GET_ULP_FROM_CRI(
+ phwi_ctrlr, cri_index));
}
static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
@@ -2072,8 +2159,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
@@ -2081,8 +2170,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_INVALIDATE_INDEX_NOTIFY:
case CMD_INVALIDATED_NOTIFY:
@@ -2110,8 +2201,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
cqe_desc[code], code, cid);
+ spin_lock_bh(&phba->async_pdu_lock);
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
+ spin_unlock_bh(&phba->async_pdu_lock);
break;
case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
case CXN_KILLED_BURST_LEN_MISMATCH:
@@ -2476,26 +2569,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
}
+/**
+ * beiscsi_find_mem_req()- Find mem needed
+ * @phba: ptr to HBA struct
+ **/
static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
{
+ uint8_t mem_descr_index, ulp_num;
unsigned int num_cq_pages, num_async_pdu_buf_pages;
unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
sizeof(struct sol_cqe));
- num_async_pdu_buf_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- phba->params.defpdu_hdr_sz);
- num_async_pdu_buf_sgl_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- sizeof(struct phys_addr));
- num_async_pdu_data_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- phba->params.defpdu_data_sz);
- num_async_pdu_data_sgl_pages =
- PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
- sizeof(struct phys_addr));
phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
@@ -2517,24 +2603,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
phba->params.icds_per_ctrl;
phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
-
- phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
- num_async_pdu_buf_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
- num_async_pdu_data_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
- num_async_pdu_buf_sgl_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
- num_async_pdu_data_sgl_pages * PAGE_SIZE;
- phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
- phba->params.asyncpdus_per_ctrl *
- sizeof(struct async_pdu_handle);
- phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
- phba->params.asyncpdus_per_ctrl *
- sizeof(struct async_pdu_handle);
- phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
- sizeof(struct hwi_async_pdu_context) +
- (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ num_async_pdu_buf_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ num_async_pdu_buf_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_hdr_sz);
+
+ num_async_pdu_data_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ phba->params.defpdu_data_sz);
+
+ num_async_pdu_data_sgl_pages =
+ PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ phba, ulp_num) *
+ sizeof(struct phys_addr));
+
+ mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_buf_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ num_async_pdu_data_sgl_pages *
+ PAGE_SIZE;
+
+ mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct async_pdu_handle);
+
+ mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+ phba->mem_req[mem_descr_index] =
+ sizeof(struct hwi_async_pdu_context) +
+ (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct hwi_async_entry));
+ }
+ }
}
static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
@@ -2576,6 +2717,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
mem_descr = phba->init_mem;
for (i = 0; i < SE_MEM_MAX; i++) {
+ if (!phba->mem_req[i]) {
+ mem_descr->mem_array = NULL;
+ mem_descr++;
+ continue;
+ }
+
j = 0;
mem_arr = mem_arr_orig;
alloc_size = phba->mem_req[i];
@@ -2697,7 +2844,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
/* Allocate memory for WRBQ */
phwi_ctxt = phwi_ctrlr->phwi_ctxt;
phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
- phba->fw_config.iscsi_cid_count,
+ phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!phwi_ctxt->be_wrbq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -2779,6 +2926,7 @@ init_wrb_hndl_failed:
static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
{
+ uint8_t ulp_num;
struct hwi_controller *phwi_ctrlr;
struct hba_parameters *p = &phba->params;
struct hwi_async_pdu_context *pasync_ctx;
@@ -2786,155 +2934,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
unsigned int index, idx, num_per_mem, num_async_data;
struct be_mem_descriptor *mem_descr;
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
-
- phwi_ctrlr = phba->phwi_ctrlr;
- phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET));
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
+ (struct hwi_async_pdu_context *)
+ mem_descr->mem_array[0].virtual_address;
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+ pasync_ctx->async_entry =
+ (struct hwi_async_entry *)
+ ((long unsigned int)pasync_ctx +
+ sizeof(struct hwi_async_pdu_context));
+
+ pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+ ulp_num);
+ pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.va_base =
mem_descr->mem_array[0].virtual_address;
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
- memset(pasync_ctx, 0, sizeof(*pasync_ctx));
- pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
- phba->fw_config.iscsi_cid_count,
- GFP_KERNEL);
- if (!pasync_ctx->async_entry) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
- return -ENOMEM;
- }
-
- pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
- pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.va_base =
- mem_descr->mem_array[0].virtual_address;
-
- pasync_ctx->async_header.pa_base.u.a64.address =
- mem_descr->mem_array[0].bus_address.u.a64.address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_RING;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.ring_base =
- mem_descr->mem_array[0].virtual_address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_header.handle_base =
- mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_header.writables = 0;
- INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
-
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_RING;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_DATA_RING va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
-
- pasync_ctx->async_data.ring_base =
- mem_descr->mem_array[0].virtual_address;
-
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
- if (!mem_descr->mem_array[0].virtual_address)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
+ pasync_ctx->async_header.pa_base.u.a64.address =
+ mem_descr->mem_array[0].
+ bus_address.u.a64.address;
- pasync_ctx->async_data.handle_base =
- mem_descr->mem_array[0].virtual_address;
- pasync_ctx->async_data.writables = 0;
- INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.ring_base =
+ mem_descr->mem_array[0].virtual_address;
- pasync_header_h =
- (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
- pasync_data_h =
- (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_header.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_header.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ pasync_ctx->async_data.ring_base =
+ mem_descr->mem_array[0].virtual_address;
- mem_descr = (struct be_mem_descriptor *)phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_BUF;
- if (mem_descr->mem_array[0].virtual_address) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : hwi_init_async_pdu_ctx"
- " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
- mem_descr->mem_array[0].virtual_address);
- } else
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : No Virtual address\n");
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (!mem_descr->mem_array[0].virtual_address)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
- idx = 0;
- pasync_ctx->async_data.va_base =
- mem_descr->mem_array[idx].virtual_address;
- pasync_ctx->async_data.pa_base.u.a64.address =
- mem_descr->mem_array[idx].bus_address.u.a64.address;
-
- num_async_data = ((mem_descr->mem_array[idx].size) /
- phba->params.defpdu_data_sz);
- num_per_mem = 0;
-
- for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
- pasync_header_h->cri = -1;
- pasync_header_h->index = (char)index;
- INIT_LIST_HEAD(&pasync_header_h->link);
- pasync_header_h->pbuffer =
- (void *)((unsigned long)
- (pasync_ctx->async_header.va_base) +
- (p->defpdu_hdr_sz * index));
-
- pasync_header_h->pa.u.a64.address =
- pasync_ctx->async_header.pa_base.u.a64.address +
- (p->defpdu_hdr_sz * index);
-
- list_add_tail(&pasync_header_h->link,
- &pasync_ctx->async_header.free_list);
- pasync_header_h++;
- pasync_ctx->async_header.free_entries++;
- pasync_ctx->async_header.writables++;
-
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
- header_busy_list);
- pasync_data_h->cri = -1;
- pasync_data_h->index = (char)index;
- INIT_LIST_HEAD(&pasync_data_h->link);
-
- if (!num_async_data) {
- num_per_mem = 0;
- idx++;
+ pasync_ctx->async_data.handle_base =
+ mem_descr->mem_array[0].virtual_address;
+ pasync_ctx->async_data.writables = 0;
+ INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+
+ pasync_header_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_header.handle_base;
+ pasync_data_h =
+ (struct async_pdu_handle *)
+ pasync_ctx->async_data.handle_base;
+
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ if (mem_descr->mem_array[0].virtual_address) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : hwi_init_async_pdu_ctx"
+ " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
+ ulp_num,
+ mem_descr->mem_array[0].
+ virtual_address);
+ } else
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : No Virtual address for ULP : %d\n",
+ ulp_num);
+
+ idx = 0;
pasync_ctx->async_data.va_base =
mem_descr->mem_array[idx].virtual_address;
pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2943,32 +3086,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
num_async_data = ((mem_descr->mem_array[idx].size) /
phba->params.defpdu_data_sz);
- }
- pasync_data_h->pbuffer =
- (void *)((unsigned long)
- (pasync_ctx->async_data.va_base) +
- (p->defpdu_data_sz * num_per_mem));
-
- pasync_data_h->pa.u.a64.address =
- pasync_ctx->async_data.pa_base.u.a64.address +
- (p->defpdu_data_sz * num_per_mem);
- num_per_mem++;
- num_async_data--;
+ num_per_mem = 0;
- list_add_tail(&pasync_data_h->link,
- &pasync_ctx->async_data.free_list);
- pasync_data_h++;
- pasync_ctx->async_data.free_entries++;
- pasync_ctx->async_data.writables++;
+ for (index = 0; index < BEISCSI_GET_CID_COUNT
+ (phba, ulp_num); index++) {
+ pasync_header_h->cri = -1;
+ pasync_header_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_header_h->link);
+ pasync_header_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->
+ async_header.va_base) +
+ (p->defpdu_hdr_sz * index));
+
+ pasync_header_h->pa.u.a64.address =
+ pasync_ctx->async_header.pa_base.u.a64.
+ address + (p->defpdu_hdr_sz * index);
+
+ list_add_tail(&pasync_header_h->link,
+ &pasync_ctx->async_header.
+ free_list);
+ pasync_header_h++;
+ pasync_ctx->async_header.free_entries++;
+ pasync_ctx->async_header.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ wait_queue.list);
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ header_busy_list);
+ pasync_data_h->cri = -1;
+ pasync_data_h->index = (char)index;
+ INIT_LIST_HEAD(&pasync_data_h->link);
+
+ if (!num_async_data) {
+ num_per_mem = 0;
+ idx++;
+ pasync_ctx->async_data.va_base =
+ mem_descr->mem_array[idx].
+ virtual_address;
+ pasync_ctx->async_data.pa_base.u.
+ a64.address =
+ mem_descr->mem_array[idx].
+ bus_address.u.a64.address;
+ num_async_data =
+ ((mem_descr->mem_array[idx].
+ size) /
+ phba->params.defpdu_data_sz);
+ }
+ pasync_data_h->pbuffer =
+ (void *)((unsigned long)
+ (pasync_ctx->async_data.va_base) +
+ (p->defpdu_data_sz * num_per_mem));
+
+ pasync_data_h->pa.u.a64.address =
+ pasync_ctx->async_data.pa_base.u.a64.
+ address + (p->defpdu_data_sz *
+ num_per_mem);
+ num_per_mem++;
+ num_async_data--;
+
+ list_add_tail(&pasync_data_h->link,
+ &pasync_ctx->async_data.
+ free_list);
+ pasync_data_h++;
+ pasync_ctx->async_data.free_entries++;
+ pasync_ctx->async_data.writables++;
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+ data_busy_list);
+ }
- INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
+ pasync_ctx->async_header.host_write_ptr = 0;
+ pasync_ctx->async_header.ep_read_ptr = -1;
+ pasync_ctx->async_data.host_write_ptr = 0;
+ pasync_ctx->async_data.ep_read_ptr = -1;
+ }
}
- pasync_ctx->async_header.host_write_ptr = 0;
- pasync_ctx->async_header.ep_read_ptr = -1;
- pasync_ctx->async_data.host_write_ptr = 0;
- pasync_ctx->async_data.ep_read_ptr = -1;
-
return 0;
}
@@ -3164,7 +3358,7 @@ static int
beiscsi_create_def_hdr(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context,
struct hwi_controller *phwi_ctrlr,
- unsigned int def_pdu_ring_sz)
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
{
unsigned int idx;
int ret;
@@ -3174,36 +3368,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
void *dq_vaddress;
idx = 0;
- dq = &phwi_context->be_def_hdrq;
+ dq = &phwi_context->be_def_hdrq[ulp_num];
cq = &phwi_context->be_cq[0];
mem = &dq->dma_mem;
mem_descr = phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+ mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
dq_vaddress = mem_descr->mem_array[idx].virtual_address;
ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
+ "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
+ ulp_num);
+
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
bus_address.u.a64.address;
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
def_pdu_ring_sz,
- phba->params.defpdu_hdr_sz);
+ phba->params.defpdu_hdr_sz,
+ BEISCSI_DEFQ_HDR, ulp_num);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+ "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
+ ulp_num);
+
return ret;
}
- phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : iscsi def pdu id is %d\n",
- phwi_context->be_def_hdrq.id);
- hwi_post_async_buffers(phba, 1);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_hdrq[ulp_num].id);
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
return 0;
}
@@ -3211,7 +3411,7 @@ static int
beiscsi_create_def_data(struct beiscsi_hba *phba,
struct hwi_context_memory *phwi_context,
struct hwi_controller *phwi_ctrlr,
- unsigned int def_pdu_ring_sz)
+ unsigned int def_pdu_ring_sz, uint8_t ulp_num)
{
unsigned int idx;
int ret;
@@ -3221,43 +3421,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
void *dq_vaddress;
idx = 0;
- dataq = &phwi_context->be_def_dataq;
+ dataq = &phwi_context->be_def_dataq[ulp_num];
cq = &phwi_context->be_cq[0];
mem = &dataq->dma_mem;
mem_descr = phba->init_mem;
- mem_descr += HWI_MEM_ASYNC_DATA_RING;
+ mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
dq_vaddress = mem_descr->mem_array[idx].virtual_address;
ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
sizeof(struct phys_addr),
sizeof(struct phys_addr), dq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
+ "BM_%d : be_fill_queue Failed for DEF PDU "
+ "DATA on ULP : %d\n",
+ ulp_num);
+
return ret;
}
mem->dma = (unsigned long)mem_descr->mem_array[idx].
bus_address.u.a64.address;
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
def_pdu_ring_sz,
- phba->params.defpdu_data_sz);
+ phba->params.defpdu_data_sz,
+ BEISCSI_DEFQ_DATA, ulp_num);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d be_cmd_create_default_pdu_queue"
- " Failed for DEF PDU DATA\n");
+ " Failed for DEF PDU DATA on ULP : %d\n",
+ ulp_num);
return ret;
}
- phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : iscsi def data id is %d\n",
- phwi_context->be_def_dataq.id);
+ "BM_%d : iscsi def data id on ULP : %d is %d\n",
+ ulp_num,
+ phwi_context->be_def_dataq[ulp_num].id);
- hwi_post_async_buffers(phba, 0);
+ hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+ "BM_%d : DEFAULT PDU DATA RING CREATED"
+ "on ULP : %d\n", ulp_num);
return 0;
}
+
+static int
+beiscsi_post_template_hdr(struct beiscsi_hba *phba)
+{
+ struct be_mem_descriptor *mem_descr;
+ struct mem_array *pm_arr;
+ struct be_dma_mem sgl;
+ int status, ulp_num;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+ mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
+ (ulp_num * MEM_DESCR_OFFSET);
+ pm_arr = mem_descr->mem_array;
+
+ hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+ status = be_cmd_iscsi_post_template_hdr(
+ &phba->ctrl, &sgl);
+
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Post Template HDR Failed for"
+ "ULP_%d\n", ulp_num);
+ return status;
+ }
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : Template HDR Pages Posted for"
+ "ULP_%d\n", ulp_num);
+ }
+ }
+ return 0;
+}
+
static int
beiscsi_post_pages(struct beiscsi_hba *phba)
{
@@ -3265,14 +3508,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
struct mem_array *pm_arr;
unsigned int page_offset, i;
struct be_dma_mem sgl;
- int status;
+ int status, ulp_num = 0;
mem_descr = phba->init_mem;
mem_descr += HWI_MEM_SGE;
pm_arr = mem_descr->mem_array;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
- phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
+ phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
for (i = 0; i < mem_descr->num_elements; i++) {
hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
@@ -3324,13 +3571,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
{
unsigned int wrb_mem_index, offset, size, num_wrb_rings;
u64 pa_addr_lo;
- unsigned int idx, num, i;
+ unsigned int idx, num, i, ulp_num;
struct mem_array *pwrb_arr;
void *wrb_vaddr;
struct be_dma_mem sgl;
struct be_mem_descriptor *mem_descr;
struct hwi_wrb_context *pwrb_context;
int status;
+ uint8_t ulp_count = 0, ulp_base_num = 0;
+ uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
idx = 0;
mem_descr = phba->init_mem;
@@ -3374,14 +3623,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
num_wrb_rings--;
}
}
+
+ /* Get the ULP Count */
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ ulp_count++;
+ ulp_base_num = ulp_num;
+ cid_count_ulp[ulp_num] =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ }
+
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
wrb_mem_index = 0;
offset = 0;
size = 0;
+ if (ulp_count > 1) {
+ ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
+
+ if (!cid_count_ulp[ulp_base_num])
+ ulp_base_num = (ulp_base_num + 1) %
+ BEISCSI_ULP_COUNT;
+
+ cid_count_ulp[ulp_base_num]--;
+ }
+
+
hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
- &phwi_context->be_wrbq[i]);
+ &phwi_context->be_wrbq[i],
+ &phwi_ctrlr->wrb_context[i],
+ ulp_base_num);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : wrbq create failed.");
@@ -3389,7 +3661,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
return status;
}
pwrb_context = &phwi_ctrlr->wrb_context[i];
- pwrb_context->cid = phwi_context->be_wrbq[i].id;
BE_SET_CID_TO_CRI(i, pwrb_context->cid);
}
kfree(pwrb_arr);
@@ -3433,10 +3704,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct hwi_async_pdu_context *pasync_ctx;
- int i, eq_num;
+ int i, eq_num, ulp_num;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ be_cmd_iscsi_remove_template_hdr(ctrl);
+
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
q = &phwi_context->be_wrbq[i];
if (q->created)
@@ -3445,13 +3719,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
kfree(phwi_context->be_wrbq);
free_wrb_handles(phba);
- q = &phwi_context->be_def_hdrq;
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
- q = &phwi_context->be_def_dataq;
- if (q->created)
- beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+ q = &phwi_context->be_def_hdrq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ q = &phwi_context->be_def_dataq[ulp_num];
+ if (q->created)
+ beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+ pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+ }
+ }
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
@@ -3470,9 +3751,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
}
be_mcc_queues_destroy(phba);
-
- pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
- kfree(pasync_ctx->async_entry);
be_cmd_fw_uninit(ctrl);
}
@@ -3538,8 +3816,19 @@ static void find_num_cpus(struct beiscsi_hba *phba)
BEISCSI_MAX_NUM_CPUS : num_cpus;
break;
case BE_GEN4:
- phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
- OC_SKH_MAX_NUM_CPUS : num_cpus;
+ /*
+ * If eqid_count == 1 fall back to
+ * INTX mechanism
+ **/
+ if (phba->fw_config.eqid_count == 1) {
+ enable_msix = 0;
+ phba->num_cpus = 1;
+ return;
+ }
+
+ phba->num_cpus =
+ (num_cpus > (phba->fw_config.eqid_count - 1)) ?
+ (phba->fw_config.eqid_count - 1) : num_cpus;
break;
default:
phba->num_cpus = 1;
@@ -3552,10 +3841,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
struct hwi_context_memory *phwi_context;
unsigned int def_pdu_ring_sz;
struct be_ctrl_info *ctrl = &phba->ctrl;
- int status;
+ int status, ulp_num;
- def_pdu_ring_sz =
- phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
phwi_context->max_eqd = 0;
@@ -3588,27 +3875,48 @@ static int hwi_init_port(struct beiscsi_hba *phba)
goto error;
}
- status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
- def_pdu_ring_sz);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Default Header not created\n");
- goto error;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ def_pdu_ring_sz =
+ BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+ sizeof(struct phys_addr);
+
+ status = beiscsi_create_def_hdr(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Header not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+
+ status = beiscsi_create_def_data(phba, phwi_context,
+ phwi_ctrlr,
+ def_pdu_ring_sz,
+ ulp_num);
+ if (status != 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Default Data not created for ULP : %d\n",
+ ulp_num);
+ goto error;
+ }
+ }
}
- status = beiscsi_create_def_data(phba, phwi_context,
- phwi_ctrlr, def_pdu_ring_sz);
+ status = beiscsi_post_pages(phba);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Default Data not created\n");
+ "BM_%d : Post SGL Pages Failed\n");
goto error;
}
- status = beiscsi_post_pages(phba);
+ status = beiscsi_post_template_hdr(phba);
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Post SGL Pages Failed\n");
- goto error;
+ "BM_%d : Template HDR Posting for CXN Failed\n");
}
status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
@@ -3618,6 +3926,26 @@ static int hwi_init_port(struct beiscsi_hba *phba)
goto error;
}
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ uint16_t async_arr_idx = 0;
+
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+ uint16_t cri = 0;
+ struct hwi_async_pdu_context *pasync_ctx;
+
+ pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
+ phwi_ctrlr, ulp_num);
+ for (cri = 0; cri <
+ phba->params.cxns_per_ctrl; cri++) {
+ if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
+ (phwi_ctrlr, cri))
+ pasync_ctx->cid_to_async_cri_map[
+ phwi_ctrlr->wrb_context[cri].cid] =
+ async_arr_idx++;
+ }
+ }
+ }
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_port success\n");
return 0;
@@ -3682,6 +4010,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
(unsigned long)mem_descr->mem_array[j - 1].
bus_address.u.a64.address);
}
+
kfree(mem_descr->mem_array);
mem_descr++;
}
@@ -3721,6 +4050,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
struct sgl_handle *psgl_handle;
struct iscsi_sge *pfrag;
unsigned int arr_index, i, idx;
+ unsigned int ulp_icd_start, ulp_num = 0;
phba->io_sgl_hndl_avbl = 0;
phba->eh_sgl_hndl_avbl = 0;
@@ -3787,6 +4117,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
"\n BM_%d : mem_descr_sg->num_elements=%d\n",
mem_descr_sg->num_elements);
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+
+ ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+
arr_index = 0;
idx = 0;
while (idx < mem_descr_sg->num_elements) {
@@ -3805,8 +4141,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
pfrag += phba->params.num_sge_per_io;
- psgl_handle->sgl_index =
- phba->fw_config.iscsi_icd_start + arr_index++;
+ psgl_handle->sgl_index = ulp_icd_start + arr_index++;
}
idx++;
}
@@ -3819,15 +4154,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
{
- int i;
+ int ret;
+ uint16_t i, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
- phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
- GFP_KERNEL);
- if (!phba->cid_array) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to allocate memory in "
- "hba_setup_cid_tbls\n");
- return -ENOMEM;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
+ GFP_KERNEL);
+
+ if (!ptr_cid_info) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for ULP_CID_INFO for ULP : %d\n",
+ ulp_num);
+ ret = -ENOMEM;
+ goto free_memory;
+
+ }
+
+ /* Allocate memory for CID array */
+ ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
+ BEISCSI_GET_CID_COUNT(phba,
+ ulp_num), GFP_KERNEL);
+ if (!ptr_cid_info->cid_array) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to allocate memory"
+ "for CID_ARRAY for ULP : %d\n",
+ ulp_num);
+ kfree(ptr_cid_info);
+ ptr_cid_info = NULL;
+ ret = -ENOMEM;
+
+ goto free_memory;
+ }
+ ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
+ phba, ulp_num);
+
+ /* Save the cid_info_array ptr */
+ phba->cid_array_info[ulp_num] = ptr_cid_info;
+ }
}
phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
phba->params.cxns_per_ctrl, GFP_KERNEL);
@@ -3835,9 +4201,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in "
"hba_setup_cid_tbls\n");
- kfree(phba->cid_array);
- phba->cid_array = NULL;
- return -ENOMEM;
+ ret = -ENOMEM;
+
+ goto free_memory;
}
phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
@@ -3847,18 +4213,44 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
"BM_%d : Failed to allocate memory in"
"hba_setup_cid_tbls\n");
- kfree(phba->cid_array);
kfree(phba->ep_array);
- phba->cid_array = NULL;
phba->ep_array = NULL;
- return -ENOMEM;
+ ret = -ENOMEM;
+ }
+
+ for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+ ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
+
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+ ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
+ phba->phwi_ctrlr->wrb_context[i].cid;
+
}
- for (i = 0; i < phba->params.cxns_per_ctrl; i++)
- phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
- phba->avlbl_cids = phba->params.cxns_per_ctrl;
+ ptr_cid_info->cid_alloc = 0;
+ ptr_cid_info->cid_free = 0;
+ }
+ }
return 0;
+
+free_memory:
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
+ return ret;
}
static void hwi_enable_intr(struct beiscsi_hba *phba)
@@ -4113,20 +4505,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
static void beiscsi_clean_port(struct beiscsi_hba *phba)
{
- int mgmt_status;
-
- mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
- if (mgmt_status)
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
- "BM_%d : mgmt_epfw_cleanup FAILED\n");
+ int mgmt_status, ulp_num;
+ struct ulp_cid_info *ptr_cid_info = NULL;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
+ if (mgmt_status)
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_INIT,
+ "BM_%d : mgmt_epfw_cleanup FAILED"
+ " for ULP_%d\n", ulp_num);
+ }
+ }
hwi_purge_eq(phba);
hwi_cleanup(phba);
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
- kfree(phba->cid_array);
kfree(phba->ep_array);
kfree(phba->conn_table);
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ ptr_cid_info = phba->cid_array_info[ulp_num];
+
+ if (ptr_cid_info) {
+ kfree(ptr_cid_info->cid_array);
+ kfree(ptr_cid_info);
+ phba->cid_array_info[ulp_num] = NULL;
+ }
+ }
+ }
+
}
/**
@@ -4255,8 +4666,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
<< DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
}
static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4481,7 +4892,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
DB_DEF_PDU_WRB_INDEX_MASK) <<
DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4536,7 +4948,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4638,7 +5051,8 @@ static int beiscsi_mtask(struct iscsi_task *task)
doorbell |= (io_task->pwrb_handle->wrb_index &
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
- iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ iowrite32(doorbell, phba->db_va +
+ beiscsi_conn->doorbell_offset);
return 0;
}
@@ -4663,8 +5077,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
struct beiscsi_hba *phba = NULL;
phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
- "BM_%d : scsi_dma_map Failed\n");
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
+ "BM_%d : scsi_dma_map Failed "
+ "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
+ be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
+ io_task->libiscsi_itt, scsi_bufflen(sc));
return num_sg;
}
@@ -4769,10 +5187,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
/*
* beiscsi_quiesce()- Cleanup Driver resources
* @phba: Instance Priv structure
+ * @unload_state:i Clean or EEH unload state
*
* Free the OS and HW resources held by the driver
**/
-static void beiscsi_quiesce(struct beiscsi_hba *phba)
+static void beiscsi_quiesce(struct beiscsi_hba *phba,
+ uint32_t unload_state)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
@@ -4785,28 +5205,37 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
if (phba->msix_enabled) {
for (i = 0; i <= phba->num_cpus; i++) {
msix_vec = phba->msix_entries[i].vector;
+ synchronize_irq(msix_vec);
free_irq(msix_vec, &phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
} else
- if (phba->pcidev->irq)
+ if (phba->pcidev->irq) {
+ synchronize_irq(phba->pcidev->irq);
free_irq(phba->pcidev->irq, phba);
+ }
pci_disable_msix(phba->pcidev);
- destroy_workqueue(phba->wq);
+
if (blk_iopoll_enabled)
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
blk_iopoll_disable(&pbe_eq->iopoll);
}
- beiscsi_clean_port(phba);
- beiscsi_free_mem(phba);
+ if (unload_state == BEISCSI_CLEAN_UNLOAD) {
+ destroy_workqueue(phba->wq);
+ beiscsi_clean_port(phba);
+ beiscsi_free_mem(phba);
- beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
- phba->ctrl.mbox_mem_alloced.size,
- phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
+ beiscsi_unmap_pci_function(phba);
+ pci_free_consistent(phba->pcidev,
+ phba->ctrl.mbox_mem_alloced.size,
+ phba->ctrl.mbox_mem_alloced.va,
+ phba->ctrl.mbox_mem_alloced.dma);
+ } else {
+ hwi_purge_eq(phba);
+ hwi_cleanup(phba);
+ }
cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
@@ -4823,11 +5252,13 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
beiscsi_destroy_def_ifaces(phba);
- beiscsi_quiesce(phba);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_disable_pcie_error_reporting(pcidev);
+ pci_set_drvdata(pcidev, NULL);
pci_disable_device(pcidev);
}
@@ -4842,7 +5273,7 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
return;
}
- beiscsi_quiesce(phba);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_disable_device(pcidev);
}
@@ -4880,6 +5311,167 @@ beiscsi_hw_health_check(struct work_struct *work)
msecs_to_jiffies(1000));
}
+
+static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct beiscsi_hba *phba = NULL;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ phba->state |= BE_ADAPTER_PCI_ERR;
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH error detected\n");
+
+ beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+
+ if (state == pci_channel_io_perm_failure) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH : State PERM Failure");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_disable_device(pdev);
+
+ /* The error could cause the FW to trigger a flash debug dump.
+ * Resetting the card while flash dump is in progress
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
+ **/
+ if (pdev->devfn == 0)
+ ssleep(30);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
+{
+ struct beiscsi_hba *phba = NULL;
+ int status = 0;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset\n");
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* Wait for the CHIP Reset to complete */
+ status = be_chk_reset_complete(phba);
+ if (!status) {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completed\n");
+ } else {
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : EEH Reset Completion Failure\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void beiscsi_eeh_resume(struct pci_dev *pdev)
+{
+ int ret = 0, i;
+ struct be_eq_obj *pbe_eq;
+ struct beiscsi_hba *phba = NULL;
+ struct hwi_controller *phwi_ctrlr;
+ struct hwi_context_memory *phwi_context;
+
+ phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+ pci_save_state(pdev);
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed\n");
+ goto ret_err;
+ }
+
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset.\n");
+ goto ret_err;
+ }
+
+ beiscsi_get_params(phba);
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
+ ret = hwi_init_controller(phba);
+
+ for (i = 0; i < MAX_MCC_CMD; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ }
+
+ phwi_ctrlr = phba->phwi_ctrlr;
+ phwi_context = phwi_ctrlr->phwi_ctxt;
+
+ if (blk_iopoll_enabled) {
+ for (i = 0; i < phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+ be_iopoll);
+ blk_iopoll_enable(&pbe_eq->iopoll);
+ }
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ } else {
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ } else {
+ pbe_eq = &phwi_context->be_eq[0];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ }
+
+ ret = beiscsi_init_irqs(phba);
+ if (ret < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_eeh_resume - "
+ "Failed to beiscsi_init_irqs\n");
+ goto ret_err;
+ }
+
+ hwi_enable_intr(phba);
+ phba->state &= ~BE_ADAPTER_PCI_ERR;
+
+ return;
+ret_err:
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : AER EEH Resume Failed\n");
+}
+
static int beiscsi_dev_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
@@ -4887,7 +5479,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret, i;
+ int ret = 0, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -4903,10 +5495,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto disable_pci;
}
+ /* Enable EEH reporting */
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret)
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ "BM_%d : PCIe Error Reporting "
+ "Enabling Failed\n");
+
+ pci_save_state(pcidev);
+
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
phba->fw_timeout = false;
+ phba->mac_addr_set = false;
switch (pcidev->device) {
@@ -4929,20 +5531,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->generation = 0;
}
- if (enable_msix)
- find_num_cpus(phba);
- else
- phba->num_cpus = 1;
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BM_%d : num_cpus = %d\n",
- phba->num_cpus);
-
- if (enable_msix) {
- beiscsi_msix_enable(phba);
- if (!phba->msix_enabled)
- phba->num_cpus = 1;
- }
ret = be_ctrl_init(phba, pcidev);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4954,27 +5542,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
ret = beiscsi_cmd_reset_function(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed. Aborting Crashdump\n");
+ "BM_%d : Reset Failed\n");
goto hba_free;
}
ret = be_chk_reset_complete(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset."
- "Aborting Crashdump\n");
+ "BM_%d : Failed to get out of reset.\n");
goto hba_free;
}
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
spin_lock_init(&phba->isr_lock);
+ spin_lock_init(&phba->async_pdu_lock);
ret = mgmt_get_fw_config(&phba->ctrl, phba);
if (ret != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Error getting fw config\n");
goto free_port;
}
- phba->shost->max_id = phba->fw_config.iscsi_cid_count;
+
+ if (enable_msix)
+ find_num_cpus(phba);
+ else
+ phba->num_cpus = 1;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BM_%d : num_cpus = %d\n",
+ phba->num_cpus);
+
+ if (enable_msix) {
+ beiscsi_msix_enable(phba);
+ if (!phba->msix_enabled)
+ phba->num_cpus = 1;
+ }
+
+ phba->shost->max_id = phba->params.cxns_per_ctrl;
beiscsi_get_params(phba);
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
@@ -4985,7 +5589,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_port;
}
- for (i = 0; i < MAX_MCC_CMD ; i++) {
+ for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
phba->ctrl.mcc_numtag[i + 1] = 0;
@@ -5089,6 +5693,12 @@ disable_pci:
return ret;
}
+static struct pci_error_handlers beiscsi_eeh_handlers = {
+ .error_detected = beiscsi_eeh_err_detected,
+ .slot_reset = beiscsi_eeh_reset,
+ .resume = beiscsi_eeh_resume,
+};
+
struct iscsi_transport beiscsi_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -5127,7 +5737,8 @@ static struct pci_driver beiscsi_pci_driver = {
.probe = beiscsi_dev_probe,
.remove = beiscsi_remove,
.shutdown = beiscsi_shutdown,
- .id_table = beiscsi_pci_id_table
+ .id_table = beiscsi_pci_id_table,
+ .err_handler = &beiscsi_eeh_handlers
};
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 2c06ef3c02ac..31fa27b4a9b2 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -26,6 +26,7 @@
#include <linux/in.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -34,9 +35,8 @@
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
-#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.0.467.0"
+#define BUILD_STR "10.0.659.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -66,7 +66,6 @@
#define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7
-#define OC_SKH_MAX_NUM_CPUS 31
#define BEISCSI_VER_STRLEN 32
@@ -74,6 +73,7 @@
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
+#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -97,14 +97,19 @@
#define INVALID_SESS_HANDLE 0xFFFFFFFF
-#define BE_ADAPTER_UP 0x00000000
-#define BE_ADAPTER_LINK_DOWN 0x00000001
+#define BE_ADAPTER_LINK_UP 0x001
+#define BE_ADAPTER_LINK_DOWN 0x002
+#define BE_ADAPTER_PCI_ERR 0x004
+
+#define BEISCSI_CLEAN_UNLOAD 0x01
+#define BEISCSI_EEH_UNLOAD 0x02
/**
* hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default
*/
-#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
+#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \
+ (phwi->phwi_ctxt->pasync_ctx[ulp_num])
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
@@ -149,29 +154,41 @@
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
-#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
- (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
-#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
- (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
+#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
+#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+ (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
+#define MEM_DESCR_OFFSET 8
+#define BEISCSI_DEFQ_HDR 1
+#define BEISCSI_DEFQ_DATA 0
enum be_mem_enum {
HWI_MEM_ADDN_CONTEXT,
HWI_MEM_WRB,
HWI_MEM_WRBH,
HWI_MEM_SGLH,
HWI_MEM_SGE,
- HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
- HWI_MEM_ASYNC_DATA_BUF,
- HWI_MEM_ASYNC_HEADER_RING,
- HWI_MEM_ASYNC_DATA_RING,
- HWI_MEM_ASYNC_HEADER_HANDLE,
- HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
- HWI_MEM_ASYNC_PDU_CONTEXT,
+ HWI_MEM_TEMPLATE_HDR_ULP0,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP0,
+ HWI_MEM_ASYNC_HEADER_RING_ULP0,
+ HWI_MEM_ASYNC_DATA_RING_ULP0,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
+ HWI_MEM_TEMPLATE_HDR_ULP1,
+ HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */
+ HWI_MEM_ASYNC_DATA_BUF_ULP1,
+ HWI_MEM_ASYNC_HEADER_RING_ULP1,
+ HWI_MEM_ASYNC_DATA_RING_ULP1,
+ HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
+ HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */
+ HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
ISCSI_MEM_GLOBAL_HEADER,
SE_MEM_MAX
};
@@ -266,9 +283,49 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
+#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
+ (phwi_ctrlr->wrb_context[cri].ulp_num)
+struct hwi_wrb_context {
+ struct list_head wrb_handle_list;
+ struct list_head wrb_handle_drvr_list;
+ struct wrb_handle **pwrb_handle_base;
+ struct wrb_handle **pwrb_handle_basestd;
+ struct iscsi_wrb *plast_wrb;
+ unsigned short alloc_index;
+ unsigned short free_index;
+ unsigned short wrb_handles_available;
+ unsigned short cid;
+ uint8_t ulp_num; /* ULP to which CID binded */
+ uint16_t register_set;
+ uint16_t doorbell_format;
+ uint32_t doorbell_offset;
+};
+
+struct ulp_cid_info {
+ unsigned short *cid_array;
+ unsigned short avlbl_cids;
+ unsigned short cid_alloc;
+ unsigned short cid_free;
+};
+
+#include "be.h"
#define chip_be2(phba) (phba->generation == BE_GEN2)
#define chip_be3_r(phba) (phba->generation == BE_GEN3)
#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
+
+#define BEISCSI_ULP0 0
+#define BEISCSI_ULP1 1
+#define BEISCSI_ULP_COUNT 2
+#define BEISCSI_ULP0_LOADED 0x01
+#define BEISCSI_ULP1_LOADED 0x02
+
+#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
+ (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
+#define BEISCSI_ULP0_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
+#define BEISCSI_ULP1_AVLBL_CID(phba) \
+ BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
+
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -303,17 +360,15 @@ struct beiscsi_hba {
spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock;
spinlock_t isr_lock;
+ spinlock_t async_pdu_lock;
unsigned int age;
- unsigned short avlbl_cids;
- unsigned short cid_alloc;
- unsigned short cid_free;
struct list_head hba_queue;
#define BE_MAX_SESSION 2048
#define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
unsigned short cid_to_cri_map[BE_MAX_SESSION];
- unsigned short *cid_array;
+ struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
struct iscsi_endpoint **ep_array;
struct beiscsi_conn **conn_table;
struct iscsi_boot_kset *boot_kset;
@@ -325,20 +380,21 @@ struct beiscsi_hba {
* group together since they are used most frequently
* for cid to cri conversion
*/
- unsigned int iscsi_cid_start;
unsigned int phys_port;
+ unsigned int eqid_count;
+ unsigned int cqid_count;
+ unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
+#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
+ (phba->fw_config.iscsi_cid_count[ulp_num])
+ unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
+ unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
- unsigned int isr_offset;
- unsigned int iscsi_icd_start;
- unsigned int iscsi_cid_count;
- unsigned int iscsi_icd_count;
- unsigned int pci_function;
-
- unsigned short cid_alloc;
- unsigned short cid_free;
- unsigned short avlbl_cids;
unsigned short iscsi_features;
- spinlock_t cid_lock;
+ uint16_t dual_ulp_aware;
+ unsigned long ulp_supported;
} fw_config;
unsigned int state;
@@ -346,6 +402,7 @@ struct beiscsi_hba {
bool ue_detected;
struct delayed_work beiscsi_hw_check_task;
+ bool mac_addr_set;
u8 mac_address[ETH_ALEN];
char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20];
@@ -374,6 +431,7 @@ struct beiscsi_conn {
struct iscsi_conn *conn;
struct beiscsi_hba *phba;
u32 exp_statsn;
+ u32 doorbell_offset;
u32 beiscsi_conn_cid;
struct beiscsi_endpoint *ep;
unsigned short login_in_progress;
@@ -474,7 +532,7 @@ struct amap_iscsi_sge {
};
struct beiscsi_offload_params {
- u32 dw[5];
+ u32 dw[6];
};
#define OFFLD_PARAMS_ERL 0x00000003
@@ -504,6 +562,7 @@ struct amap_beiscsi_offload_params {
u8 max_r2t[16];
u8 pad[8];
u8 exp_statsn[32];
+ u8 max_recv_data_segment_length[32];
};
/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
@@ -567,7 +626,8 @@ struct hwi_async_pdu_context {
unsigned int buffer_size;
unsigned int num_entries;
-
+#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
+ unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
/**
* This is a varying size list! Do not add anything
* after this entry!!
@@ -885,30 +945,32 @@ struct amap_iscsi_target_context_update_wrb_v2 {
u8 first_burst_length[24]; /* DWORD 3 */
u8 rsvd3[8]; /* DOWRD 3 */
u8 max_r2t[16]; /* DWORD 4 */
- u8 rsvd4[10]; /* DWORD 4 */
+ u8 rsvd4; /* DWORD 4 */
u8 hde; /* DWORD 4 */
u8 dde; /* DWORD 4 */
u8 erl[2]; /* DWORD 4 */
+ u8 rsvd5[6]; /* DWORD 4 */
u8 imd; /* DWORD 4 */
u8 ir2t; /* DWORD 4 */
+ u8 rsvd6[3]; /* DWORD 4 */
u8 stat_sn[32]; /* DWORD 5 */
- u8 rsvd5[32]; /* DWORD 6 */
- u8 rsvd6[32]; /* DWORD 7 */
+ u8 rsvd7[32]; /* DWORD 6 */
+ u8 rsvd8[32]; /* DWORD 7 */
u8 max_recv_dataseg_len[24]; /* DWORD 8 */
- u8 rsvd7[8]; /* DWORD 8 */
- u8 rsvd8[32]; /* DWORD 9 */
- u8 rsvd9[32]; /* DWORD 10 */
+ u8 rsvd9[8]; /* DWORD 8 */
+ u8 rsvd10[32]; /* DWORD 9 */
+ u8 rsvd11[32]; /* DWORD 10 */
u8 max_cxns[16]; /* DWORD 11 */
- u8 rsvd10[11]; /* DWORD 11*/
+ u8 rsvd12[11]; /* DWORD 11*/
u8 invld; /* DWORD 11 */
- u8 rsvd11;/* DWORD 11*/
+ u8 rsvd13;/* DWORD 11*/
u8 dmsg; /* DWORD 11 */
u8 data_seq_inorder; /* DWORD 11 */
u8 pdu_seq_inorder; /* DWORD 11 */
- u8 rsvd12[32]; /*DWORD 12 */
- u8 rsvd13[32]; /* DWORD 13 */
- u8 rsvd14[32]; /* DWORD 14 */
- u8 rsvd15[32]; /* DWORD 15 */
+ u8 rsvd14[32]; /*DWORD 12 */
+ u8 rsvd15[32]; /* DWORD 13 */
+ u8 rsvd16[32]; /* DWORD 14 */
+ u8 rsvd17[32]; /* DWORD 15 */
} __packed;
@@ -919,6 +981,10 @@ struct be_ring {
u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */
u32 item_size; /* size in bytes of one object */
+ u8 ulp_num; /* ULP to which CID binded */
+ u16 register_set;
+ u16 doorbell_format;
+ u32 doorbell_offset;
void *va; /* The virtual address of the ring. This
* should be last to allow 32 & 64 bit debugger
@@ -926,18 +992,6 @@ struct be_ring {
*/
};
-struct hwi_wrb_context {
- struct list_head wrb_handle_list;
- struct list_head wrb_handle_drvr_list;
- struct wrb_handle **pwrb_handle_base;
- struct wrb_handle **pwrb_handle_basestd;
- struct iscsi_wrb *plast_wrb;
- unsigned short alloc_index;
- unsigned short free_index;
- unsigned short wrb_handles_available;
- unsigned short cid;
-};
-
struct hwi_controller {
struct list_head io_sgl_list;
struct list_head eh_sgl_list;
@@ -946,8 +1000,8 @@ struct hwi_controller {
struct hwi_wrb_context *wrb_context;
struct mcc_wrb *pmcc_wrb_base;
- struct be_ring default_pdu_hdr;
- struct be_ring default_pdu_data;
+ struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
+ struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
struct hwi_context_memory *phwi_ctxt;
};
@@ -978,11 +1032,10 @@ struct hwi_context_memory {
struct be_eq_obj be_eq[MAX_CPUS];
struct be_queue_info be_cq[MAX_CPUS - 1];
- struct be_queue_info be_def_hdrq;
- struct be_queue_info be_def_dataq;
-
struct be_queue_info *be_wrbq;
- struct hwi_async_pdu_context *pasync_ctx;
+ struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
+ struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
+ struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
};
/* Logging related definitions */
@@ -992,6 +1045,7 @@ struct hwi_context_memory {
#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
+#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
#define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 245a9595a93a..b2fcac78feaa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -278,6 +278,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
return tag;
}
+/**
+ * mgmt_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
@@ -291,31 +303,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
status = be_mbox_notify(ctrl);
if (!status) {
+ uint8_t ulp_num = 0;
struct be_fw_cfg *pfw_cfg;
pfw_cfg = req;
+
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE)
+ set_bit(ulp_num,
+ &phba->fw_config.ulp_supported);
+
phba->fw_config.phys_port = pfw_cfg->phys_port;
- phba->fw_config.iscsi_icd_start =
- pfw_cfg->ulp[0].icd_base;
- phba->fw_config.iscsi_icd_count =
- pfw_cfg->ulp[0].icd_count;
- phba->fw_config.iscsi_cid_start =
- pfw_cfg->ulp[0].sq_base;
- phba->fw_config.iscsi_cid_count =
- pfw_cfg->ulp[0].sq_count;
- if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : FW reported MAX CXNS as %d\t"
- "Max Supported = %d.\n",
- phba->fw_config.iscsi_cid_count,
- BE2_MAX_SESSIONS);
- phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
+ }
}
+
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
} else {
- beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_get_fw_config\n");
+ status = -EINVAL;
}
spin_unlock(&ctrl->mbox_lock);
@@ -448,7 +508,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
+/**
+ * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp_num: ULP number.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero Value
+ **/
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@@ -462,9 +531,9 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
- req->chute = chute;
- req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
- req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
+ req->chute = (1 << ulp_num);
+ req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
+ req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
status = be_mcc_notify_wait(phba);
if (status)
@@ -585,6 +654,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
return tag;
}
+/**
+ * mgmt_open_connection()- Establish a TCP CXN
+ * @dst_addr: Destination Address
+ * @beiscsi_ep: ptr to device endpoint struct
+ * @nonemb_cmd: ptr to memory allocated for command
+ *
+ * return
+ * Success: Tag number of the MBX Command issued
+ * Failure: Error code
+ **/
int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep,
@@ -602,14 +681,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
unsigned int tag = 0;
- unsigned int i;
+ unsigned int i, ulp_num;
unsigned short cid = beiscsi_ep->ep_cid;
struct be_sge *sge;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
- def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
+
+ ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
+
+ def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
+ def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
@@ -748,11 +830,14 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
if (rc) {
+ /* Check if the IOCTL needs to be re-issued */
+ if (rc == -EAGAIN)
+ return rc;
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
- rc = -EIO;
goto free_cmd;
}
@@ -861,7 +946,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
uint32_t boot_proto)
{
struct be_cmd_get_def_gateway_resp gtway_addr_set;
- struct be_cmd_get_if_info_resp if_info;
+ struct be_cmd_get_if_info_resp *if_info;
struct be_cmd_set_dhcp_req *dhcpreq;
struct be_cmd_rel_dhcp_req *reldhcp;
struct be_dma_mem nonemb_cmd;
@@ -872,16 +957,17 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (mgmt_get_all_if_id(phba))
return -EIO;
- memset(&if_info, 0, sizeof(if_info));
ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
BE2_IPV6 : BE2_IPV4 ;
rc = mgmt_get_if_info(phba, ip_type, &if_info);
- if (rc)
+ if (rc) {
+ kfree(if_info);
return rc;
+ }
if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
- if (if_info.dhcp_state) {
+ if (if_info->dhcp_state) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : DHCP Already Enabled\n");
return 0;
@@ -894,9 +980,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
IP_V6_LEN : IP_V4_LEN;
} else {
- if (if_info.dhcp_state) {
+ if (if_info->dhcp_state) {
- memset(&if_info, 0, sizeof(if_info));
+ memset(if_info, 0, sizeof(*if_info));
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
sizeof(*reldhcp));
@@ -919,8 +1005,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
}
/* Delete the Static IP Set */
- if (if_info.ip_addr.addr[0]) {
- rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+ if (if_info->ip_addr.addr[0]) {
+ rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
IP_ACTION_DEL);
if (rc)
return rc;
@@ -966,7 +1052,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
} else {
- return mgmt_static_ip_modify(phba, &if_info, ip_param,
+ return mgmt_static_ip_modify(phba, if_info, ip_param,
subnet_param, IP_ACTION_ADD);
}
@@ -1031,27 +1117,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
}
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp *if_info)
+ struct be_cmd_get_if_info_resp **if_info)
{
struct be_cmd_get_if_info_req *req;
struct be_dma_mem nonemb_cmd;
+ uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc;
if (mgmt_get_all_if_id(phba))
return -EIO;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
- sizeof(*if_info));
- if (rc)
- return rc;
+ do {
+ rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ ioctl_size);
+ if (rc)
+ return rc;
- req = nonemb_cmd.va;
- req->interface_hndl = phba->interface_handle;
- req->ip_type = ip_type;
+ req = nonemb_cmd.va;
+ req->interface_hndl = phba->interface_handle;
+ req->ip_type = ip_type;
+
+ /* Allocate memory for if_info */
+ *if_info = kzalloc(ioctl_size, GFP_KERNEL);
+ if (!*if_info) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BG_%d : Memory Allocation Failure\n");
+
+ /* Free the DMA memory for the IOCTL issuing */
+ pci_free_consistent(phba->ctrl.pdev,
+ nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+ return -ENOMEM;
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
+ ioctl_size);
+
+ /* Check if the error is because of Insufficent_Buffer */
+ if (rc == -EAGAIN) {
+
+ /* Get the new memory size */
+ ioctl_size = ((struct be_cmd_resp_hdr *)
+ nonemb_cmd.va)->actual_resp_len;
+ ioctl_size += sizeof(struct be_cmd_req_hdr);
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
- sizeof(*if_info));
+ /* Free the previous allocated DMA memory */
+ pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ nonemb_cmd.va,
+ nonemb_cmd.dma);
+
+ /* Free the virtual memory */
+ kfree(*if_info);
+ } else
+ break;
+ } while (true);
+ return rc;
}
int mgmt_get_nic_conf(struct beiscsi_hba *phba,
@@ -1281,7 +1404,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
}
/**
- * beiscsi_active_cid_disp()- Display Sessions Active
+ * beiscsi_active_session_disp()- Display Sessions Active
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text Session Count
@@ -1290,14 +1413,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
* size of the formatted string
**/
ssize_t
-beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+ avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
+ total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
+ } else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+ return len;
+}
+
+/**
+ * beiscsi_free_session_disp()- Display Avaliable Session
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+ uint16_t ulp_num, len = 0;
+
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
+ }
+
+ return len;
}
/**
@@ -1338,6 +1503,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
}
}
+/**
+ * beiscsi_phys_port()- Display Physical Port Identifier
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text port identifier
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+ phba->fw_config.phys_port);
+}
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
@@ -1411,10 +1595,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_burst_length, pwrb, params->dw[offsetof
- (struct amap_beiscsi_offload_params,
- max_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_burst_length, pwrb, params->dw[offsetof
(struct amap_beiscsi_offload_params,
@@ -1436,7 +1616,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
params->dw[offsetof(struct amap_beiscsi_offload_params,
first_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
- max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+ max_recv_dataseg_len, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_recv_data_segment_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_cxns, pwrb, BEISCSI_MAX_CXNS);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 04af7e74fe48..01b8c97284c0 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_cmd_get_nic_conf_resp *mac);
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
- struct be_cmd_get_if_info_resp *if_info);
+ struct be_cmd_get_if_info_resp **if_info);
int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
struct be_cmd_get_def_gateway_resp *gateway);
@@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev,
ssize_t beiscsi_fw_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf);
-ssize_t beiscsi_active_cid_disp(struct device *dev,
- struct device_attribute *attr, char *buf);
+ssize_t beiscsi_active_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
ssize_t beiscsi_adap_family_disp(struct device *dev,
struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_free_session_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_phys_port_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
struct be_mem_descriptor *mem_descr);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 9611195d6703..f8ca7becacca 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9796284512a9..9967f9c14851 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -206,7 +206,7 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
if (!hal_io) {
- /* IO has been completed, retrun success */
+ /* IO has been completed, return success */
rc = SUCCESS;
goto out;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 08b22a901c25..1ebf3fb683e6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.14"
+#define BNX2FC_VERSION "2.4.1"
#define PFX "bnx2fc: "
@@ -105,7 +105,7 @@
#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
-#define BNX2FC_5771X_DB_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
#define BNX2FC_TASK_SIZE 128
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 23f6eea3fa7a..9b948505d118 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Mar 08, 2013"
+#define DRV_MODULE_RELDATE "Sep 17, 2013"
static char version[] =
@@ -2004,6 +2004,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
+/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
+static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == true) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_disable: lport not found\n");
+ return -ENODEV;
+ } else {
+ interface->enabled = false;
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ }
+ }
+ return 0;
+}
+
/**
* Deperecated: Use bnx2fc_enabled()
*/
@@ -2018,20 +2036,34 @@ static int bnx2fc_disable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface);
- if (!interface || !ctlr->lp) {
+
+ if (!interface) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
+ pr_err(PFX "bnx2fc_disable: interface not found\n");
} else {
- interface->enabled = false;
- fcoe_ctlr_link_down(ctlr);
- fcoe_clean_pending_queue(ctlr->lp);
+ rc = __bnx2fc_disable(ctlr);
}
-
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
}
+static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
+{
+ struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+ if (interface->enabled == false) {
+ if (!ctlr->lp) {
+ pr_err(PFX "__bnx2fc_enable: lport not found\n");
+ return -ENODEV;
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
+ interface->enabled = true;
+ }
+ }
+ return 0;
+}
+
/**
* Deprecated: Use bnx2fc_enabled()
*/
@@ -2046,12 +2078,11 @@ static int bnx2fc_enable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface);
- if (!interface || !ctlr->lp) {
+ if (!interface) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(ctlr->lp)) {
- fcoe_ctlr_link_up(ctlr);
- interface->enabled = true;
+ pr_err(PFX "bnx2fc_enable: interface not found\n");
+ } else {
+ rc = __bnx2fc_enable(ctlr);
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -2072,14 +2103,12 @@ static int bnx2fc_enable(struct net_device *netdev)
static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
- struct fc_lport *lport = ctlr->lp;
- struct net_device *netdev = bnx2fc_netdev(lport);
switch (cdev->enabled) {
case FCOE_CTLR_ENABLED:
- return bnx2fc_enable(netdev);
+ return __bnx2fc_enable(ctlr);
case FCOE_CTLR_DISABLED:
- return bnx2fc_disable(netdev);
+ return __bnx2fc_disable(ctlr);
case FCOE_CTLR_UNUSED:
default:
return -ENOTSUPP;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a8f8f9..46a37657307f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
- (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 575142e92d9c..ed880891cb7c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1246,6 +1246,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
rc = bnx2fc_expl_logo(lport, io_req);
+ /* This only occurs when an task abort was requested while ABTS
+ is in progress. Setting the IO_CLEANUP flag will skip the
+ RRQ process in the case when the fw generated SCSI_CMD cmpl
+ was a result from the ABTS request rather than the CLEANUP
+ request */
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
goto out;
}
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 25093a04123b..3d33767f2f2c 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index f2db5fe7bdc2..7052a839b0ea 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u16 cq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 cq_num_wqes;
@@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u8 cq_log_wqes_per_page;
#endif
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index f109e3b073c3..c73bbcb63c02 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -64,7 +64,7 @@
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
-#define BNX2I_5771X_DBELL_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index a28b03e5a5f6..5be718c241c4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
- DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 50fef6963a81..34c294b42c84 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.7.2.2"
-#define DRV_MODULE_RELDATE "Apr 25, 2012"
+#define DRV_MODULE_VERSION "2.7.6.2"
+#define DRV_MODULE_RELDATE "Jun 06, 2013"
static char version[] =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -172,16 +172,14 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
- /*
- * We should never register devices that don't support iSCSI
- * (see bnx2i_init_one), so something is wrong if we try to
- * start a iSCSI adapter on hardware with 0 supported iSCSI
- * connections
+ /* On some bnx2x devices, it is possible that iSCSI is no
+ * longer supported after firmware is downloaded. In that
+ * case, the iscsi_init_msg will return failure.
*/
- BUG_ON(!hba->cnic->max_iscsi_conn);
bnx2i_send_fw_iscsi_init_msg(hba);
- while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
+ !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 0056e47bd56e..fabeb88602ac 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
/*
* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index c61cf7a43658..a0a3d9fe61fe 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2004 - 2012 Broadcom Corporation
+ * Copyright (c) 2004 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 80fa99b3d384..8135f04671af 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -658,11 +658,11 @@ static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
static inline void *cxgbi_alloc_big_mem(unsigned int size,
gfp_t gfp)
{
- void *p = kmalloc(size, gfp);
+ void *p = kzalloc(size, gfp | __GFP_NOWARN);
+
if (!p)
- p = vmalloc(size);
- if (p)
- memset(p, 0, size);
+ p = vzalloc(size);
+
return p;
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 694e13c45dfd..42e8624a9b9a 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -308,6 +308,8 @@ struct AdapterCtlBlk {
struct timer_list waiting_timer;
struct timer_list selto_timer;
+ unsigned long last_reset;
+
u16 srb_count;
u8 sel_timeout;
@@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
init_timer(&acb->waiting_timer);
acb->waiting_timer.function = waiting_timeout;
acb->waiting_timer.data = (unsigned long) acb;
- if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
+ if (time_before(jiffies + to, acb->last_reset - HZ / 2))
acb->waiting_timer.expires =
- acb->scsi_host->last_reset - HZ / 2 + 1;
+ acb->last_reset - HZ / 2 + 1;
else
acb->waiting_timer.expires = jiffies + to + 1;
add_timer(&acb->waiting_timer);
@@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
udelay(500);
/* We may be in serious trouble. Wait some seconds */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + 3 * HZ / 2 +
HZ * acb->eeprom.delay_time;
@@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb)
acb->selto_timer.function = selection_timeout_missed;
acb->selto_timer.data = (unsigned long) acb;
if (time_before
- (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
+ (jiffies + HZ, acb->last_reset + HZ / 2))
acb->selto_timer.expires =
- acb->scsi_host->last_reset + HZ / 2 + 1;
+ acb->last_reset + HZ / 2 + 1;
else
acb->selto_timer.expires = jiffies + HZ + 1;
add_timer(&acb->selto_timer);
@@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
}
/* Allow starting of SCSI commands half a second before we allow the mid-level
* to queue them again after a reset */
- if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
+ if (time_before(jiffies, acb->last_reset - HZ / 2)) {
dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
return 1;
}
@@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
dprintkl(KERN_ERR, "disconnect: No such device\n");
udelay(500);
/* Suspend queue for a while */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time;
clear_fifo(acb, "disconnectEx");
@@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
waiting_process_next(acb);
} else if (srb->state & SRB_ABORT_SENT) {
dcb->flag &= ~ABORT_DEV_;
- acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
+ acb->last_reset = jiffies + HZ / 2 + 1;
dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
waiting_process_next(acb);
@@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb)
/*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
udelay(500);
/* Maybe we locked up the bus? Then lets wait even longer ... */
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + 5 * HZ / 2 +
HZ * acb->eeprom.delay_time;
@@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host)
host->dma_channel = -1;
host->unique_id = acb->io_port_base;
host->irq = acb->irq_level;
- host->last_reset = jiffies;
+ acb->last_reset = jiffies;
host->max_id = 16;
if (host->max_id - 1 == eeprom->scsi_id)
@@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
/*spin_unlock_irq (&io_request_lock); */
udelay(500);
- acb->scsi_host->last_reset =
+ acb->last_reset =
jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 68adb8955d2d..5248c888552b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -481,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
+ /*
+ * Device internal reset
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
/*
* Mode Parameters Changed
@@ -517,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev,
/*
* alua_rtpg - Evaluate REPORT TARGET GROUP STATES
* @sdev: the device to be evaluated.
+ * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
*
* Evaluate the Target Port Group State.
* Returns SCSI_DH_DEV_OFFLINED if the path is
* found to be unusable.
*/
-static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
{
struct scsi_sense_hdr sense_hdr;
int len, k, off, valid_states = 0;
@@ -594,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
else
h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
- if (orig_transition_tmo != h->transition_tmo) {
+ if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
sdev_printk(KERN_INFO, sdev,
"%s: transition timeout set to %d seconds\n",
ALUA_DH_NAME, h->transition_tmo);
@@ -632,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
switch (h->state) {
case TPGS_STATE_TRANSITIONING:
- if (time_before(jiffies, expiry)) {
- /* State transition, retry */
- interval += 2000;
- msleep(interval);
- goto retry;
+ if (wait_for_transition) {
+ if (time_before(jiffies, expiry)) {
+ /* State transition, retry */
+ interval += 2000;
+ msleep(interval);
+ goto retry;
+ }
+ err = SCSI_DH_RETRY;
+ } else {
+ err = SCSI_DH_OK;
}
+
/* Transitioning time exceeded, set port to standby */
- err = SCSI_DH_RETRY;
h->state = TPGS_STATE_STANDBY;
break;
case TPGS_STATE_OFFLINE:
@@ -673,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
if (err != SCSI_DH_OK)
goto out;
- err = alua_rtpg(sdev, h);
+ err = alua_rtpg(sdev, h, 0);
if (err != SCSI_DH_OK)
goto out;
@@ -733,7 +744,7 @@ static int alua_activate(struct scsi_device *sdev,
int err = SCSI_DH_OK;
int stpg = 0;
- err = alua_rtpg(sdev, h);
+ err = alua_rtpg(sdev, h, 1);
if (err != SCSI_DH_OK)
goto out;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 69c915aa77c2..4b9cf93f3fb6 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1742"},
{"IBM", "1745"},
{"IBM", "1746"},
+ {"IBM", "1813"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 19e1b422260a..c0ae8fa57a3b 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
}
rmb();
- /*
- * TODO: I need to block here if I am processing ioctl cmds
- * but if the outstanding cmds all finish before the ioctl,
- * the scsi-core will not know to start sending cmds to me again.
- * I need to a way to restart the scsi-cores queues or should I block
- * calling scsi_done on the outstanding cmds instead
- * for now we don't set the IOCTL state
- */
- if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
- pHba->host->last_reset = jiffies;
- pHba->host->resetting = 1;
- return 1;
- }
+ if ((pHba->state) & DPTI_STATE_RESET)
+ return SCSI_MLQUEUE_HOST_BUSY;
// TODO if the cmd->device if offline then I may need to issue a bus rescan
// followed by a get_lct to see if the device is there anymore
@@ -1811,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
}
do {
- if(pHba->host)
+ /*
+ * Stop any new commands from enterring the
+ * controller while processing the ioctl
+ */
+ if (pHba->host) {
+ scsi_block_requests(pHba->host);
spin_lock_irqsave(pHba->host->host_lock, flags);
- // This state stops any new commands from enterring the
- // controller while processing the ioctl
-// pHba->state |= DPTI_STATE_IOCTL;
-// We can't set this now - The scsi subsystem sets host_blocked and
-// the queue empties and stops. We need a way to restart the queue
+ }
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
if (rcode != 0)
printk("adpt_i2o_passthru: post wait failed %d %p\n",
rcode, reply);
-// pHba->state &= ~DPTI_STATE_IOCTL;
- if(pHba->host)
+ if (pHba->host) {
spin_unlock_irqrestore(pHba->host->host_lock, flags);
- } while(rcode == -ETIMEDOUT);
+ scsi_unblock_requests(pHba->host);
+ }
+ } while (rcode == -ETIMEDOUT);
if(rcode){
goto cleanup;
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index beded716f93f..aeb046186c84 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -202,7 +202,6 @@ struct adpt_channel {
// HBA state flags
#define DPTI_STATE_RESET (0x01)
-#define DPTI_STATE_IOCTL (0x02)
typedef struct _adpt_hba {
struct _adpt_hba *next;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 356def44ce58..1663173cdb91 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -919,7 +919,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
find_pio_EISA(&gc);
find_pio_ISA(&gc);
- for (i = 0; i <= MAXIRQ; i++)
+ for (i = 0; i < MAXIRQ; i++)
if (reg_IRQ[i])
request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
new file mode 100644
index 000000000000..78fdbfd9b4b7
--- /dev/null
+++ b/drivers/scsi/esas2r/Kconfig
@@ -0,0 +1,5 @@
+config SCSI_ESAS2R
+ tristate "ATTO Technology's ExpressSAS RAID adapter driver"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
new file mode 100644
index 000000000000..c77160b8c8bd
--- /dev/null
+++ b/drivers/scsi/esas2r/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
+
+esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
+ esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
+ esas2r_vda.o esas2r_main.o
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
new file mode 100644
index 000000000000..4aca3d52c851
--- /dev/null
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -0,0 +1,1254 @@
+/* linux/drivers/scsi/esas2r/atioctl.h
+ * ATTO IOCTL Handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "atvda.h"
+
+#ifndef ATIOCTL_H
+#define ATIOCTL_H
+
+#define EXPRESS_IOCTL_SIGNATURE "Express"
+#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
+
+/* structure definitions for IOCTls */
+
+struct __packed atto_express_ioctl_header {
+ u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
+ u8 return_code;
+
+#define IOCTL_SUCCESS 0
+#define IOCTL_ERR_INVCMD 101
+#define IOCTL_INIT_FAILED 102
+#define IOCTL_NOT_IMPLEMENTED 103
+#define IOCTL_BAD_CHANNEL 104
+#define IOCTL_TARGET_OVERRUN 105
+#define IOCTL_TARGET_NOT_ENABLED 106
+#define IOCTL_BAD_FLASH_IMGTYPE 107
+#define IOCTL_OUT_OF_RESOURCES 108
+#define IOCTL_GENERAL_ERROR 109
+#define IOCTL_INVALID_PARAM 110
+
+ u8 channel;
+ u8 retries;
+ u8 pad[5];
+};
+
+/*
+ * NOTE - if channel == 0xFF, the request is
+ * handled on the adapter it came in on.
+ */
+#define MAX_NODE_NAMES 256
+
+struct __packed atto_firmware_rw_request {
+ u8 function;
+ #define FUNC_FW_DOWNLOAD 0x09
+ #define FUNC_FW_UPLOAD 0x12
+
+ u8 img_type;
+ #define FW_IMG_FW 0x01
+ #define FW_IMG_BIOS 0x02
+ #define FW_IMG_NVR 0x03
+ #define FW_IMG_RAW 0x04
+ #define FW_IMG_FM_API 0x05
+ #define FW_IMG_FS_API 0x06
+
+ u8 pad[2];
+ u32 img_offset;
+ u32 img_size;
+ u8 image[0x80000];
+};
+
+struct __packed atto_param_rw_request {
+ u16 code;
+ char data_buffer[512];
+};
+
+#define MAX_CHANNEL 256
+
+struct __packed atto_channel_list {
+ u32 num_channels;
+ u8 channel[MAX_CHANNEL];
+};
+
+struct __packed atto_channel_info {
+ u8 major_rev;
+ u8 minor_rev;
+ u8 IRQ;
+ u8 revision_id;
+ u8 pci_bus;
+ u8 pci_dev_func;
+ u8 core_rev;
+ u8 host_no;
+ u16 device_id;
+ u16 vendor_id;
+ u16 ven_dev_id;
+ u8 pad[3];
+ u32 hbaapi_rev;
+};
+
+/*
+ * CSMI control codes
+ * class independent
+ */
+#define CSMI_CC_GET_DRVR_INFO 1
+#define CSMI_CC_GET_CNTLR_CFG 2
+#define CSMI_CC_GET_CNTLR_STS 3
+#define CSMI_CC_FW_DOWNLOAD 4
+
+/* RAID class */
+#define CSMI_CC_GET_RAID_INFO 10
+#define CSMI_CC_GET_RAID_CFG 11
+
+/* HBA class */
+#define CSMI_CC_GET_PHY_INFO 20
+#define CSMI_CC_SET_PHY_INFO 21
+#define CSMI_CC_GET_LINK_ERRORS 22
+#define CSMI_CC_SMP_PASSTHRU 23
+#define CSMI_CC_SSP_PASSTHRU 24
+#define CSMI_CC_STP_PASSTHRU 25
+#define CSMI_CC_GET_SATA_SIG 26
+#define CSMI_CC_GET_SCSI_ADDR 27
+#define CSMI_CC_GET_DEV_ADDR 28
+#define CSMI_CC_TASK_MGT 29
+#define CSMI_CC_GET_CONN_INFO 30
+
+/* PHY class */
+#define CSMI_CC_PHY_CTRL 60
+
+/*
+ * CSMI status codes
+ * class independent
+ */
+#define CSMI_STS_SUCCESS 0
+#define CSMI_STS_FAILED 1
+#define CSMI_STS_BAD_CTRL_CODE 2
+#define CSMI_STS_INV_PARAM 3
+#define CSMI_STS_WRITE_ATTEMPTED 4
+
+/* RAID class */
+#define CSMI_STS_INV_RAID_SET 1000
+
+/* HBA class */
+#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
+#define CSMI_STS_PHY_UNCHANGEABLE 2000
+#define CSMI_STS_INV_LINK_RATE 2001
+#define CSMI_STS_INV_PHY 2002
+#define CSMI_STS_INV_PHY_FOR_PORT 2003
+#define CSMI_STS_PHY_UNSELECTABLE 2004
+#define CSMI_STS_SELECT_PHY_OR_PORT 2005
+#define CSMI_STS_INV_PORT 2006
+#define CSMI_STS_PORT_UNSELECTABLE 2007
+#define CSMI_STS_CONNECTION_FAILED 2008
+#define CSMI_STS_NO_SATA_DEV 2009
+#define CSMI_STS_NO_SATA_SIGNATURE 2010
+#define CSMI_STS_SCSI_EMULATION 2011
+#define CSMI_STS_NOT_AN_END_DEV 2012
+#define CSMI_STS_NO_SCSI_ADDR 2013
+#define CSMI_STS_NO_DEV_ADDR 2014
+
+/* CSMI class independent structures */
+struct atto_csmi_get_driver_info {
+ char name[81];
+ char description[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 csmi_major_rev;
+ u16 csmi_minor_rev;
+ #define CSMI_MAJOR_REV_0_81 0
+ #define CSMI_MINOR_REV_0_81 81
+
+ #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
+ #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
+};
+
+struct atto_csmi_get_pci_bus_addr {
+ u8 bus_num;
+ u8 device_num;
+ u8 function_num;
+ u8 reserved;
+};
+
+struct atto_csmi_get_cntlr_cfg {
+ u32 base_io_addr;
+
+ struct {
+ u32 base_memaddr_lo;
+ u32 base_memaddr_hi;
+ };
+
+ u32 board_id;
+ u16 slot_num;
+ #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
+
+ u8 cntlr_class;
+ #define CSMI_CNTLR_CLASS_HBA 5
+
+ u8 io_bus_type;
+ #define CSMI_BUS_TYPE_PCI 3
+ #define CSMI_BUS_TYPE_PCMCIA 4
+
+ union {
+ struct atto_csmi_get_pci_bus_addr pci_addr;
+ u8 reserved[32];
+ };
+
+ char serial_num[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 bios_major_rev;
+ u16 bios_minor_rev;
+ u16 bios_build_rev;
+ u16 bios_release_rev;
+ u32 cntlr_flags;
+ #define CSMI_CNTLRF_SAS_HBA 0x00000001
+ #define CSMI_CNTLRF_SAS_RAID 0x00000002
+ #define CSMI_CNTLRF_SATA_HBA 0x00000004
+ #define CSMI_CNTLRF_SATA_RAID 0x00000008
+ #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
+ #define CSMI_CNTLRF_FWD_ONLINE 0x00020000
+ #define CSMI_CNTLRF_FWD_SRESET 0x00040000
+ #define CSMI_CNTLRF_FWD_HRESET 0x00080000
+ #define CSMI_CNTLRF_FWD_RROM 0x00100000
+
+ u16 rrom_major_rev;
+ u16 rrom_minor_rev;
+ u16 rrom_build_rev;
+ u16 rrom_release_rev;
+ u16 rrom_biosmajor_rev;
+ u16 rrom_biosminor_rev;
+ u16 rrom_biosbuild_rev;
+ u16 rrom_biosrelease_rev;
+ u8 reserved2[7];
+};
+
+struct atto_csmi_get_cntlr_sts {
+ u32 status;
+ #define CSMI_CNTLR_STS_GOOD 1
+ #define CSMI_CNTLR_STS_FAILED 2
+ #define CSMI_CNTLR_STS_OFFLINE 3
+ #define CSMI_CNTLR_STS_POWEROFF 4
+
+ u32 offline_reason;
+ #define CSMI_OFFLINE_NO_REASON 0
+ #define CSMI_OFFLINE_INITIALIZING 1
+ #define CSMI_OFFLINE_BUS_DEGRADED 2
+ #define CSMI_OFFLINE_BUS_FAILURE 3
+
+ u8 reserved[28];
+};
+
+struct atto_csmi_fw_download {
+ u32 buffer_len;
+ u32 download_flags;
+ #define CSMI_FWDF_VALIDATE 0x00000001
+ #define CSMI_FWDF_SOFT_RESET 0x00000002
+ #define CSMI_FWDF_HARD_RESET 0x00000004
+
+ u8 reserved[32];
+ u16 status;
+ #define CSMI_FWD_STS_SUCCESS 0
+ #define CSMI_FWD_STS_FAILED 1
+ #define CSMI_FWD_STS_USING_RROM 2
+ #define CSMI_FWD_STS_REJECT 3
+ #define CSMI_FWD_STS_DOWNREV 4
+
+ u16 severity;
+ #define CSMI_FWD_SEV_INFO 0
+ #define CSMI_FWD_SEV_WARNING 1
+ #define CSMI_FWD_SEV_ERROR 2
+ #define CSMI_FWD_SEV_FATAL 3
+
+};
+
+/* CSMI RAID class structures */
+struct atto_csmi_get_raid_info {
+ u32 num_raid_sets;
+ u32 max_drivesper_set;
+ u8 reserved[92];
+};
+
+struct atto_csmi_raid_drives {
+ char model[40];
+ char firmware[8];
+ char serial_num[40];
+ u8 sas_addr[8];
+ u8 lun[8];
+ u8 drive_sts;
+ #define CSMI_DRV_STS_OK 0
+ #define CSMI_DRV_STS_REBUILDING 1
+ #define CSMI_DRV_STS_FAILED 2
+ #define CSMI_DRV_STS_DEGRADED 3
+
+ u8 drive_usage;
+ #define CSMI_DRV_USE_NOT_USED 0
+ #define CSMI_DRV_USE_MEMBER 1
+ #define CSMI_DRV_USE_SPARE 2
+
+ u8 reserved[30]; /* spec says 22 */
+};
+
+struct atto_csmi_get_raid_cfg {
+ u32 raid_set_index;
+ u32 capacity;
+ u32 stripe_size;
+ u8 raid_type;
+ u8 status;
+ u8 information;
+ u8 drive_cnt;
+ u8 reserved[20];
+
+ struct atto_csmi_raid_drives drives[1];
+};
+
+/* CSMI HBA class structures */
+struct atto_csmi_phy_entity {
+ u8 ident_frame[0x1C];
+ u8 port_id;
+ u8 neg_link_rate;
+ u8 min_link_rate;
+ u8 max_link_rate;
+ u8 phy_change_cnt;
+ u8 auto_discover;
+ #define CSMI_DISC_NOT_SUPPORTED 0x00
+ #define CSMI_DISC_NOT_STARTED 0x01
+ #define CSMI_DISC_IN_PROGRESS 0x02
+ #define CSMI_DISC_COMPLETE 0x03
+ #define CSMI_DISC_ERROR 0x04
+
+ u8 reserved[2];
+ u8 attach_ident_frame[0x1C];
+};
+
+struct atto_csmi_get_phy_info {
+ u8 number_of_phys;
+ u8 reserved[3];
+ struct atto_csmi_phy_entity
+ phy[32];
+};
+
+struct atto_csmi_set_phy_info {
+ u8 phy_id;
+ u8 neg_link_rate;
+ #define CSMI_NEG_RATE_NEGOTIATE 0x00
+ #define CSMI_NEG_RATE_PHY_DIS 0x01
+
+ u8 prog_minlink_rate;
+ u8 prog_maxlink_rate;
+ u8 signal_class;
+ #define CSMI_SIG_CLASS_UNKNOWN 0x00
+ #define CSMI_SIG_CLASS_DIRECT 0x01
+ #define CSMI_SIG_CLASS_SERVER 0x02
+ #define CSMI_SIG_CLASS_ENCLOSURE 0x03
+
+ u8 reserved[3];
+};
+
+struct atto_csmi_get_link_errors {
+ u8 phy_id;
+ u8 reset_cnts;
+ #define CSMI_RESET_CNTS_NO 0x00
+ #define CSMI_RESET_CNTS_YES 0x01
+
+ u8 reserved[2];
+ u32 inv_dw_cnt;
+ u32 disp_err_cnt;
+ u32 loss_ofdw_sync_cnt;
+ u32 phy_reseterr_cnt;
+
+ /*
+ * The following field has been added by ATTO for ease of
+ * implementation of additional statistics. Drivers must validate
+ * the length of the IOCTL payload prior to filling them in so CSMI
+ * complaint applications function correctly.
+ */
+
+ u32 crc_err_cnt;
+};
+
+struct atto_csmi_smp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u32 req_len;
+ u8 smp_req[1020];
+ u8 conn_sts;
+ u8 reserved2[3];
+ u32 rsp_len;
+ u8 smp_rsp[1020];
+};
+
+struct atto_csmi_ssp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 data_present;
+ u8 status;
+ u16 rsp_length;
+ u8 rsp[256];
+ u32 data_bytes;
+};
+
+struct atto_csmi_ssp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 lun[8];
+ u8 cdb_len;
+ u8 add_cdb_len;
+ u8 reserved2[2];
+ u8 cdb[16];
+ u32 flags;
+ #define CSMI_SSPF_DD_READ 0x00000001
+ #define CSMI_SSPF_DD_WRITE 0x00000002
+ #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_SSPF_TA_SIMPLE 0x00000000
+ #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
+ #define CSMI_SSPF_TA_ORDERED 0x00000020
+ #define CSMI_SSPF_TA_ACA 0x00000040
+
+ u8 add_cdb[24];
+ u32 data_len;
+
+ struct atto_csmi_ssp_passthru_sts sts;
+};
+
+struct atto_csmi_stp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 sts_fis[20];
+ u32 scr[16];
+ u32 data_bytes;
+};
+
+struct atto_csmi_stp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 reserved2[4];
+ u8 command_fis[20];
+ u32 flags;
+ #define CSMI_STPF_DD_READ 0x00000001
+ #define CSMI_STPF_DD_WRITE 0x00000002
+ #define CSMI_STPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_STPF_PIO 0x00000010
+ #define CSMI_STPF_DMA 0x00000020
+ #define CSMI_STPF_PACKET 0x00000040
+ #define CSMI_STPF_DMA_QUEUED 0x00000080
+ #define CSMI_STPF_EXECUTE_DIAG 0x00000100
+ #define CSMI_STPF_RESET_DEVICE 0x00000200
+
+ u32 data_len;
+
+ struct atto_csmi_stp_passthru_sts sts;
+};
+
+struct atto_csmi_get_sata_sig {
+ u8 phy_id;
+ u8 reserved[3];
+ u8 reg_dth_fis[20];
+};
+
+struct atto_csmi_get_scsi_addr {
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+};
+
+struct atto_csmi_get_dev_addr {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+};
+
+struct atto_csmi_task_mgmt {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u32 flags;
+ #define CSMI_TMF_TASK_IU 0x00000001
+ #define CSMI_TMF_HARD_RST 0x00000002
+ #define CSMI_TMF_SUPPRESS_RSLT 0x00000004
+
+ u32 queue_tag;
+ u32 reserved;
+ u8 task_mgt_func;
+ u8 reserved2[7];
+ u32 information;
+ #define CSMI_TM_INFO_TEST 1
+ #define CSMI_TM_INFO_EXCEEDED 2
+ #define CSMI_TM_INFO_DEMAND 3
+ #define CSMI_TM_INFO_TRIGGER 4
+
+ struct atto_csmi_ssp_passthru_sts sts;
+
+};
+
+struct atto_csmi_get_conn_info {
+ u32 pinout;
+ #define CSMI_CON_UNKNOWN 0x00000001
+ #define CSMI_CON_SFF_8482 0x00000002
+ #define CSMI_CON_SFF_8470_LANE_1 0x00000100
+ #define CSMI_CON_SFF_8470_LANE_2 0x00000200
+ #define CSMI_CON_SFF_8470_LANE_3 0x00000400
+ #define CSMI_CON_SFF_8470_LANE_4 0x00000800
+ #define CSMI_CON_SFF_8484_LANE_1 0x00010000
+ #define CSMI_CON_SFF_8484_LANE_2 0x00020000
+ #define CSMI_CON_SFF_8484_LANE_3 0x00040000
+ #define CSMI_CON_SFF_8484_LANE_4 0x00080000
+
+ u8 connector[16];
+ u8 location;
+ #define CSMI_CON_INTERNAL 0x02
+ #define CSMI_CON_EXTERNAL 0x04
+ #define CSMI_CON_SWITCHABLE 0x08
+ #define CSMI_CON_AUTO 0x10
+
+ u8 reserved[15];
+};
+
+/* CSMI PHY class structures */
+struct atto_csmi_character {
+ u8 type_flags;
+ #define CSMI_CTF_POS_DISP 0x01
+ #define CSMI_CTF_NEG_DISP 0x02
+ #define CSMI_CTF_CTRL_CHAR 0x04
+
+ u8 value;
+};
+
+struct atto_csmi_pc_ctrl {
+ u8 type;
+ #define CSMI_PC_TYPE_UNDEFINED 0x00
+ #define CSMI_PC_TYPE_SATA 0x01
+ #define CSMI_PC_TYPE_SAS 0x02
+ u8 rate;
+ u8 reserved[6];
+ u32 vendor_unique[8];
+ u32 tx_flags;
+ #define CSMI_PC_TXF_PREEMP_DIS 0x00000001
+
+ signed char tx_amplitude;
+ signed char tx_preemphasis;
+ signed char tx_slew_rate;
+ signed char tx_reserved[13];
+ u8 tx_vendor_unique[64];
+ u32 rx_flags;
+ #define CSMI_PC_RXF_EQ_DIS 0x00000001
+
+ signed char rx_threshold;
+ signed char rx_equalization_gain;
+ signed char rx_reserved[14];
+ u8 rx_vendor_unique[64];
+ u32 pattern_flags;
+ #define CSMI_PC_PATF_FIXED 0x00000001
+ #define CSMI_PC_PATF_DIS_SCR 0x00000002
+ #define CSMI_PC_PATF_DIS_ALIGN 0x00000004
+ #define CSMI_PC_PATF_DIS_SSC 0x00000008
+
+ u8 fixed_pattern;
+ #define CSMI_PC_FP_CJPAT 0x00000001
+ #define CSMI_PC_FP_ALIGN 0x00000002
+
+ u8 user_pattern_len;
+ u8 pattern_reserved[6];
+
+ struct atto_csmi_character user_pattern_buffer[16];
+};
+
+struct atto_csmi_phy_ctrl {
+ u32 function;
+ #define CSMI_PC_FUNC_GET_SETUP 0x00000100
+
+ u8 phy_id;
+ u16 len_of_cntl;
+ u8 num_of_cntls;
+ u8 reserved[4];
+ u32 link_flags;
+ #define CSMI_PHY_ACTIVATE_CTRL 0x00000001
+ #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
+ #define CSMI_PHY_AUTO_COMWAKE 0x00000004
+
+ u8 spinup_rate;
+ u8 link_reserved[7];
+ u32 vendor_unique[8];
+
+ struct atto_csmi_pc_ctrl control[1];
+};
+
+union atto_ioctl_csmi {
+ struct atto_csmi_get_driver_info drvr_info;
+ struct atto_csmi_get_cntlr_cfg cntlr_cfg;
+ struct atto_csmi_get_cntlr_sts cntlr_sts;
+ struct atto_csmi_fw_download fw_dwnld;
+ struct atto_csmi_get_raid_info raid_info;
+ struct atto_csmi_get_raid_cfg raid_cfg;
+ struct atto_csmi_get_phy_info get_phy_info;
+ struct atto_csmi_set_phy_info set_phy_info;
+ struct atto_csmi_get_link_errors link_errs;
+ struct atto_csmi_smp_passthru smp_pass_thru;
+ struct atto_csmi_ssp_passthru ssp_pass_thru;
+ struct atto_csmi_stp_passthru stp_pass_thru;
+ struct atto_csmi_task_mgmt tsk_mgt;
+ struct atto_csmi_get_sata_sig sata_sig;
+ struct atto_csmi_get_scsi_addr scsi_addr;
+ struct atto_csmi_get_dev_addr dev_addr;
+ struct atto_csmi_get_conn_info conn_info[32];
+ struct atto_csmi_phy_ctrl phy_ctrl;
+};
+
+struct atto_csmi {
+ u32 control_code;
+ u32 status;
+ union atto_ioctl_csmi data;
+};
+
+struct atto_module_info {
+ void *adapter;
+ void *pci_dev;
+ void *scsi_host;
+ unsigned short host_no;
+ union {
+ struct {
+ u64 node_name;
+ u64 port_name;
+ };
+ u64 sas_addr;
+ };
+};
+
+#define ATTO_FUNC_GET_ADAP_INFO 0x00
+#define ATTO_VER_GET_ADAP_INFO0 0
+#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
+
+struct __packed atto_hba_get_adapter_info {
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 ss_vendor_id;
+ u16 ss_device_id;
+ u8 class_code[3];
+ u8 rev_id;
+ u8 bus_num;
+ u8 dev_num;
+ u8 func_num;
+ u8 link_width_max;
+ u8 link_width_curr;
+ #define ATTO_GAI_PCILW_UNKNOWN 0x00
+
+ u8 link_speed_max;
+ u8 link_speed_curr;
+ #define ATTO_GAI_PCILS_UNKNOWN 0x00
+ #define ATTO_GAI_PCILS_GEN1 0x01
+ #define ATTO_GAI_PCILS_GEN2 0x02
+ #define ATTO_GAI_PCILS_GEN3 0x03
+
+ u8 interrupt_mode;
+ #define ATTO_GAI_PCIIM_UNKNOWN 0x00
+ #define ATTO_GAI_PCIIM_LEGACY 0x01
+ #define ATTO_GAI_PCIIM_MSI 0x02
+ #define ATTO_GAI_PCIIM_MSIX 0x03
+
+ u8 msi_vector_cnt;
+ u8 reserved[19];
+ } pci;
+
+ u8 adap_type;
+ #define ATTO_GAI_AT_EPCIU320 0x00
+ #define ATTO_GAI_AT_ESASRAID 0x01
+ #define ATTO_GAI_AT_ESASRAID2 0x02
+ #define ATTO_GAI_AT_ESASHBA 0x03
+ #define ATTO_GAI_AT_ESASHBA2 0x04
+ #define ATTO_GAI_AT_CELERITY 0x05
+ #define ATTO_GAI_AT_CELERITY8 0x06
+ #define ATTO_GAI_AT_FASTFRAME 0x07
+ #define ATTO_GAI_AT_ESASHBA3 0x08
+ #define ATTO_GAI_AT_CELERITY16 0x09
+ #define ATTO_GAI_AT_TLSASHBA 0x0A
+ #define ATTO_GAI_AT_ESASHBA4 0x0B
+
+ u8 adap_flags;
+ #define ATTO_GAI_AF_DEGRADED 0x01
+ #define ATTO_GAI_AF_SPT_SUPP 0x02
+ #define ATTO_GAI_AF_DEVADDR_SUPP 0x04
+ #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
+ #define ATTO_GAI_AF_TEST_SUPP 0x10
+ #define ATTO_GAI_AF_DIAG_SUPP 0x20
+ #define ATTO_GAI_AF_VIRT_SES 0x40
+ #define ATTO_GAI_AF_CONN_CTRL 0x80
+
+ u8 num_ports;
+ u8 num_phys;
+ u8 drvr_rev_major;
+ u8 drvr_rev_minor;
+ u8 drvr_revsub_minor;
+ u8 drvr_rev_build;
+ char drvr_rev_ascii[16];
+ char drvr_name[32];
+ char firmware_rev[16];
+ char flash_rev[16];
+ char model_name_short[16];
+ char model_name[32];
+ u32 num_targets;
+ u32 num_targsper_bus;
+ u32 num_lunsper_targ;
+ u8 num_busses;
+ u8 num_connectors;
+ u8 adap_flags2;
+ #define ATTO_GAI_AF2_FCOE_SUPP 0x01
+ #define ATTO_GAI_AF2_NIC_SUPP 0x02
+ #define ATTO_GAI_AF2_LOCATE_SUPP 0x04
+ #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
+ #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
+ #define ATTO_GAI_AF2_NPIV_SUPP 0x20
+ #define ATTO_GAI_AF2_MP_SUPP 0x40
+
+ u8 num_temp_sensors;
+ u32 num_targets_backend;
+ u32 tunnel_flags;
+ #define ATTO_GAI_TF_MEM_RW 0x00000001
+ #define ATTO_GAI_TF_TRACE 0x00000002
+ #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
+ #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
+ #define ATTO_GAI_TF_PHY_CTRL 0x00000010
+ #define ATTO_GAI_TF_CONN_CTRL 0x00000020
+ #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
+
+ u8 reserved3[0x138];
+};
+
+#define ATTO_FUNC_GET_ADAP_ADDR 0x01
+#define ATTO_VER_GET_ADAP_ADDR0 0
+#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
+
+struct __packed atto_hba_get_adapter_address {
+
+ u8 addr_type;
+ #define ATTO_GAA_AT_PORT 0x00
+ #define ATTO_GAA_AT_NODE 0x01
+ #define ATTO_GAA_AT_CURR_MAC 0x02
+ #define ATTO_GAA_AT_PERM_MAC 0x03
+ #define ATTO_GAA_AT_VNIC 0x04
+
+ u8 port_id;
+ u16 addr_len;
+ u8 address[256];
+};
+
+#define ATTO_FUNC_MEM_RW 0x02
+#define ATTO_VER_MEM_RW0 0
+#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
+
+struct __packed atto_hba_memory_read_write {
+ u8 mem_func;
+ u8 mem_type;
+ union {
+ u8 pci_index;
+ u8 i2c_dev;
+ };
+ u8 i2c_status;
+ u32 length;
+ u64 address;
+ u8 reserved[48];
+
+};
+
+#define ATTO_FUNC_TRACE 0x03
+#define ATTO_VER_TRACE0 0
+#define ATTO_VER_TRACE1 1
+#define ATTO_VER_TRACE ATTO_VER_TRACE1
+
+struct __packed atto_hba_trace {
+ u8 trace_func;
+ #define ATTO_TRC_TF_GET_INFO 0x00
+ #define ATTO_TRC_TF_ENABLE 0x01
+ #define ATTO_TRC_TF_DISABLE 0x02
+ #define ATTO_TRC_TF_SET_MASK 0x03
+ #define ATTO_TRC_TF_UPLOAD 0x04
+ #define ATTO_TRC_TF_RESET 0x05
+
+ u8 trace_type;
+ #define ATTO_TRC_TT_DRIVER 0x00
+ #define ATTO_TRC_TT_FWCOREDUMP 0x01
+
+ u8 reserved[2];
+ u32 current_offset;
+ u32 total_length;
+ u32 trace_mask;
+ u8 reserved2[48];
+};
+
+#define ATTO_FUNC_SCSI_PASS_THRU 0x04
+#define ATTO_VER_SCSI_PASS_THRU0 0
+#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
+
+struct __packed atto_hba_scsi_pass_thru {
+ u8 cdb[32];
+ u8 cdb_length;
+ u8 req_status;
+ #define ATTO_SPT_RS_SUCCESS 0x00
+ #define ATTO_SPT_RS_FAILED 0x01
+ #define ATTO_SPT_RS_OVERRUN 0x02
+ #define ATTO_SPT_RS_UNDERRUN 0x03
+ #define ATTO_SPT_RS_NO_DEVICE 0x04
+ #define ATTO_SPT_RS_NO_LUN 0x05
+ #define ATTO_SPT_RS_TIMEOUT 0x06
+ #define ATTO_SPT_RS_BUS_RESET 0x07
+ #define ATTO_SPT_RS_ABORTED 0x08
+ #define ATTO_SPT_RS_BUSY 0x09
+ #define ATTO_SPT_RS_DEGRADED 0x0A
+
+ u8 scsi_status;
+ u8 sense_length;
+ u32 flags;
+ #define ATTO_SPTF_DATA_IN 0x00000001
+ #define ATTO_SPTF_DATA_OUT 0x00000002
+ #define ATTO_SPTF_SIMPLE_Q 0x00000004
+ #define ATTO_SPTF_HEAD_OF_Q 0x00000008
+ #define ATTO_SPTF_ORDERED_Q 0x00000010
+
+ u32 timeout;
+ u32 target_id;
+ u8 lun[8];
+ u32 residual_length;
+ u8 sense_data[0xFC];
+ u8 reserved[0x28];
+};
+
+#define ATTO_FUNC_GET_DEV_ADDR 0x05
+#define ATTO_VER_GET_DEV_ADDR0 0
+#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
+
+struct __packed atto_hba_get_device_address {
+ u8 addr_type;
+ #define ATTO_GDA_AT_PORT 0x00
+ #define ATTO_GDA_AT_NODE 0x01
+ #define ATTO_GDA_AT_MAC 0x02
+ #define ATTO_GDA_AT_PORTID 0x03
+ #define ATTO_GDA_AT_UNIQUE 0x04
+
+ u8 reserved;
+ u16 addr_len;
+ u32 target_id;
+ u8 address[256];
+};
+
+/* The following functions are supported by firmware but do not have any
+ * associated driver structures
+ */
+#define ATTO_FUNC_PHY_CTRL 0x06
+#define ATTO_FUNC_CONN_CTRL 0x0C
+#define ATTO_FUNC_ADAP_CTRL 0x0E
+#define ATTO_VER_ADAP_CTRL0 0
+#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
+
+struct __packed atto_hba_adap_ctrl {
+ u8 adap_func;
+ #define ATTO_AC_AF_HARD_RST 0x00
+ #define ATTO_AC_AF_GET_STATE 0x01
+ #define ATTO_AC_AF_GET_TEMP 0x02
+
+ u8 adap_state;
+ #define ATTO_AC_AS_UNKNOWN 0x00
+ #define ATTO_AC_AS_OK 0x01
+ #define ATTO_AC_AS_RST_SCHED 0x02
+ #define ATTO_AC_AS_RST_IN_PROG 0x03
+ #define ATTO_AC_AS_RST_DISC 0x04
+ #define ATTO_AC_AS_DEGRADED 0x05
+ #define ATTO_AC_AS_DISABLED 0x06
+ #define ATTO_AC_AS_TEMP 0x07
+
+ u8 reserved[2];
+
+ union {
+ struct {
+ u8 temp_sensor;
+ u8 temp_state;
+
+ #define ATTO_AC_TS_UNSUPP 0x00
+ #define ATTO_AC_TS_UNKNOWN 0x01
+ #define ATTO_AC_TS_INIT_FAILED 0x02
+ #define ATTO_AC_TS_NORMAL 0x03
+ #define ATTO_AC_TS_OUT_OF_RANGE 0x04
+ #define ATTO_AC_TS_FAULT 0x05
+
+ signed short temp_value;
+ signed short temp_lower_lim;
+ signed short temp_upper_lim;
+ char temp_desc[32];
+ u8 reserved2[20];
+ };
+ };
+};
+
+#define ATTO_FUNC_GET_DEV_INFO 0x0F
+#define ATTO_VER_GET_DEV_INFO0 0
+#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
+
+struct __packed atto_hba_sas_device_info {
+
+ #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
+
+ u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
+ #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
+ u32 exp_target_id;
+ u32 sas_port_mask;
+ u8 sas_level;
+ #define ATTO_SDI_SAS_LVL_INV 0xFF
+
+ u8 slot_num;
+ #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
+
+ u8 dev_type;
+ #define ATTO_SDI_DT_END_DEVICE 0
+ #define ATTO_SDI_DT_EXPANDER 1
+ #define ATTO_SDI_DT_PORT_MULT 2
+
+ u8 ini_flags;
+ u8 tgt_flags;
+ u8 link_rate; /* SMP_RATE_XXX */
+ u8 loc_flags;
+ #define ATTO_SDI_LF_DIRECT 0x01
+ #define ATTO_SDI_LF_EXPANDER 0x02
+ #define ATTO_SDI_LF_PORT_MULT 0x04
+ u8 pm_port;
+ u8 reserved[0x60];
+};
+
+union atto_hba_device_info {
+ struct atto_hba_sas_device_info sas_dev_info;
+};
+
+struct __packed atto_hba_get_device_info {
+ u32 target_id;
+ u8 info_type;
+ #define ATTO_GDI_IT_UNKNOWN 0x00
+ #define ATTO_GDI_IT_SAS 0x01
+ #define ATTO_GDI_IT_FC 0x02
+ #define ATTO_GDI_IT_FCOE 0x03
+
+ u8 reserved[11];
+ union atto_hba_device_info dev_info;
+};
+
+struct atto_ioctl {
+ u8 version;
+ u8 function; /* ATTO_FUNC_XXX */
+ u8 status;
+#define ATTO_STS_SUCCESS 0x00
+#define ATTO_STS_FAILED 0x01
+#define ATTO_STS_INV_VERSION 0x02
+#define ATTO_STS_OUT_OF_RSRC 0x03
+#define ATTO_STS_INV_FUNC 0x04
+#define ATTO_STS_UNSUPPORTED 0x05
+#define ATTO_STS_INV_ADAPTER 0x06
+#define ATTO_STS_INV_DRVR_VER 0x07
+#define ATTO_STS_INV_PARAM 0x08
+#define ATTO_STS_TIMEOUT 0x09
+#define ATTO_STS_NOT_APPL 0x0A
+#define ATTO_STS_DEGRADED 0x0B
+
+ u8 flags;
+ #define HBAF_TUNNEL 0x01
+
+ u32 data_length;
+ u8 reserved2[56];
+
+ union {
+ u8 byte[1];
+ struct atto_hba_get_adapter_info get_adap_info;
+ struct atto_hba_get_adapter_address get_adap_addr;
+ struct atto_hba_scsi_pass_thru scsi_pass_thru;
+ struct atto_hba_get_device_address get_dev_addr;
+ struct atto_hba_adap_ctrl adap_ctrl;
+ struct atto_hba_get_device_info get_dev_info;
+ struct atto_hba_trace trace;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_scsi_cmd {
+
+ #define ATTO_VDA_SCSI_VER0 0
+ #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
+
+ u8 cdb[16];
+ u32 flags;
+ u32 data_length;
+ u32 residual_length;
+ u16 target_id;
+ u8 sense_len;
+ u8 scsi_stat;
+ u8 reserved[8];
+ u8 sense_data[80];
+};
+
+struct __packed atto_ioctl_vda_flash_cmd {
+
+ #define ATTO_VDA_FLASH_VER0 0
+ #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
+
+ u32 flash_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 reserved[15];
+
+ union {
+ struct {
+ u32 flash_size;
+ u32 page_size;
+ u8 prod_info[32];
+ } info;
+
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ u32 file_size;
+ } file;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_diag_cmd {
+
+ #define ATTO_VDA_DIAG_VER0 0
+ #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
+
+ u64 local_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 flags;
+ u8 reserved[3];
+};
+
+struct __packed atto_ioctl_vda_cli_cmd {
+
+ #define ATTO_VDA_CLI_VER0 0
+ #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
+
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_smp_cmd {
+
+ #define ATTO_VDA_SMP_VER0 0
+ #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
+
+ u64 dest;
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_cfg_cmd {
+
+ #define ATTO_VDA_CFG_VER0 0
+ #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
+
+ u32 data_length;
+ u8 cfg_func;
+ u8 reserved[11];
+
+ union {
+ u8 bytes[112];
+ struct atto_vda_cfg_init init;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_mgt_cmd {
+
+ #define ATTO_VDA_MGT_VER0 0
+ #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
+
+ u8 mgt_func;
+ u8 scan_generation;
+ u16 dev_index;
+ u32 data_length;
+ u8 reserved[8];
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dh_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ struct atto_vda_adapter_info adapter_info;
+ struct atto_vda_temp_info temp_info;
+ struct atto_vda_fan_info fan_info;
+ } data;
+};
+
+struct __packed atto_ioctl_vda_gsv_cmd {
+
+ #define ATTO_VDA_GSV_VER0 0
+ #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
+
+ u8 rsp_len;
+ u8 reserved[7];
+ u8 version_info[1];
+ #define ATTO_VDA_VER_UNSUPPORTED 0xFF
+
+};
+
+struct __packed atto_ioctl_vda {
+ u8 version;
+ u8 function; /* VDA_FUNC_XXXX */
+ u8 status; /* ATTO_STS_XXX */
+ u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
+ u32 data_length;
+ u8 reserved[8];
+
+ union {
+ struct atto_ioctl_vda_scsi_cmd scsi;
+ struct atto_ioctl_vda_flash_cmd flash;
+ struct atto_ioctl_vda_diag_cmd diag;
+ struct atto_ioctl_vda_cli_cmd cli;
+ struct atto_ioctl_vda_smp_cmd smp;
+ struct atto_ioctl_vda_cfg_cmd cfg;
+ struct atto_ioctl_vda_mgt_cmd mgt;
+ struct atto_ioctl_vda_gsv_cmd gsv;
+ u8 cmd_info[256];
+ } cmd;
+
+ union {
+ u8 data[1];
+ struct atto_vda_devinfo2 dev_info2;
+ } data;
+
+};
+
+struct __packed atto_ioctl_smp {
+ u8 version;
+ #define ATTO_SMP_VERSION0 0
+ #define ATTO_SMP_VERSION1 1
+ #define ATTO_SMP_VERSION2 2
+ #define ATTO_SMP_VERSION ATTO_SMP_VERSION2
+
+ u8 function;
+#define ATTO_SMP_FUNC_DISC_SMP 0x00
+#define ATTO_SMP_FUNC_DISC_TARG 0x01
+#define ATTO_SMP_FUNC_SEND_CMD 0x02
+#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
+#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
+#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
+
+ u8 status; /* ATTO_STS_XXX */
+ u8 smp_status; /* if status == ATTO_STS_SUCCESS */
+ #define ATTO_SMP_STS_SUCCESS 0x00
+ #define ATTO_SMP_STS_FAILURE 0x01
+ #define ATTO_SMP_STS_RESCAN 0x02
+ #define ATTO_SMP_STS_NOT_FOUND 0x03
+
+ u16 target_id;
+ u8 phy_id;
+ u8 dev_index;
+ u64 smp_sas_addr;
+ u64 targ_sas_addr;
+ u32 req_length;
+ u32 rsp_length;
+ u8 flags;
+ #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
+
+ u8 reserved[31];
+
+ union {
+ u8 byte[1];
+ u32 dword[1];
+ } data;
+
+};
+
+struct __packed atto_express_ioctl {
+ struct atto_express_ioctl_header header;
+
+ union {
+ struct atto_firmware_rw_request fwrw;
+ struct atto_param_rw_request prw;
+ struct atto_channel_list chanlist;
+ struct atto_channel_info chaninfo;
+ struct atto_ioctl ioctl_hba;
+ struct atto_module_info modinfo;
+ struct atto_ioctl_vda ioctl_vda;
+ struct atto_ioctl_smp ioctl_smp;
+ struct atto_csmi csmi;
+
+ } data;
+};
+
+/* The struct associated with the code is listed after the definition */
+#define EXPRESS_IOCTL_MIN 0x4500
+#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
+#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
+#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
+#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
+#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
+#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
+#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
+#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
+#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
+#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
+#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
+#define EXPRESS_CSMI 0x450B /* CSMI */
+#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
+#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
+#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
+#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
+#define EXPRESS_IOCTL_MAX 0x450F
+
+#endif
diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h
new file mode 100644
index 000000000000..5fc1f991d24e
--- /dev/null
+++ b/drivers/scsi/esas2r/atvda.h
@@ -0,0 +1,1319 @@
+/* linux/drivers/scsi/esas2r/atvda.h
+ * ATTO VDA interface definitions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+#ifndef ATVDA_H
+#define ATVDA_H
+
+struct __packed atto_dev_addr {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ #define VDA_DEVADDRF_SATA 0x01
+ #define VDA_DEVADDRF_SSD 0x02
+ u8 link_speed; /* VDALINKSPEED_xxx */
+ u8 pad[1];
+};
+
+/* dev_addr2 was added for 64-bit alignment */
+
+struct __packed atto_dev_addr2 {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ u8 link_speed;
+ u8 pad[5];
+};
+
+struct __packed atto_vda_sge {
+ u32 length;
+ u64 address;
+};
+
+
+/* VDA request function codes */
+
+#define VDA_FUNC_SCSI 0x00
+#define VDA_FUNC_FLASH 0x01
+#define VDA_FUNC_DIAG 0x02
+#define VDA_FUNC_AE 0x03
+#define VDA_FUNC_CLI 0x04
+#define VDA_FUNC_IOCTL 0x05
+#define VDA_FUNC_CFG 0x06
+#define VDA_FUNC_MGT 0x07
+#define VDA_FUNC_GSV 0x08
+
+
+/* VDA request status values. for host driver considerations, values for
+ * SCSI requests start at zero. other requests may use these values as well. */
+
+#define RS_SUCCESS 0x00 /*! successful completion */
+#define RS_INV_FUNC 0x01 /*! invalid command function */
+#define RS_BUSY 0x02 /*! insufficient resources */
+#define RS_SEL 0x03 /*! no target at target_id */
+#define RS_NO_LUN 0x04 /*! invalid LUN */
+#define RS_TIMEOUT 0x05 /*! request timeout */
+#define RS_OVERRUN 0x06 /*! data overrun */
+#define RS_UNDERRUN 0x07 /*! data underrun */
+#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
+#define RS_ABORTED 0x0A /*! command aborted */
+#define RS_RESID_MISM 0x0B /*! residual length incorrect */
+#define RS_TM_FAILED 0x0C /*! task management failed */
+#define RS_RESET 0x0D /*! aborted due to bus reset */
+#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
+#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
+#define RS_UNSUPPORTED 0x10 /*! unsupported request */
+#define RS_SEL2 0x70 /*! internal generated RS_SEL */
+#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
+#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
+#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
+#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
+#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
+#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
+#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
+#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
+#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
+#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
+#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
+#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
+#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
+#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
+#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
+#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
+#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
+#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
+#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
+#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
+#define RS_PART_LAST (RS_MGT_BASE + 0x18)
+#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
+#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
+#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
+#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
+#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
+#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
+#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
+#define RS_PART_CAP (RS_MGT_BASE + 0x20)
+#define RS_PART_STATE (RS_MGT_BASE + 0x21)
+#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
+#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
+#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
+#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
+#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
+#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
+#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
+#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
+#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
+#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
+#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
+#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
+#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
+#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
+#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
+#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
+#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
+#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
+#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
+#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
+#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
+#define RS_DEGRADED 0xFB /*! degraded mode */
+#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
+#define RS_VDA_INTERNAL 0xFD /*! catch-all */
+#define RS_PENDING 0xFE /*! pending, not started */
+#define RS_STARTED 0xFF /*! started */
+
+
+/* flash request subfunctions. these are used in both the IOCTL and the
+ * driver-firmware interface (VDA_FUNC_FLASH). */
+
+#define VDA_FLASH_BEGINW 0x00
+#define VDA_FLASH_READ 0x01
+#define VDA_FLASH_WRITE 0x02
+#define VDA_FLASH_COMMIT 0x03
+#define VDA_FLASH_CANCEL 0x04
+#define VDA_FLASH_INFO 0x05
+#define VDA_FLASH_FREAD 0x06
+#define VDA_FLASH_FWRITE 0x07
+#define VDA_FLASH_FINFO 0x08
+
+
+/* IOCTL request subfunctions. these identify the payload type for
+ * VDA_FUNC_IOCTL.
+ */
+
+#define VDA_IOCTL_HBA 0x00
+#define VDA_IOCTL_CSMI 0x01
+#define VDA_IOCTL_SMP 0x02
+
+struct __packed atto_vda_devinfo {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+
+ union {
+ u8 dev_status;
+ #define VDADEVSTAT_INVALID 0x00
+ #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
+ #define VDADEVSTAT_ASSIGNED 0x01
+ #define VDADEVSTAT_SPARE 0x02
+ #define VDADEVSTAT_UNAVAIL 0x03
+ #define VDADEVSTAT_PT_MAINT 0x04
+ #define VDADEVSTAT_LCLSPARE 0x05
+ #define VDADEVSTAT_UNUSEABLE 0x06
+ #define VDADEVSTAT_AVAIL 0xFF
+
+ u8 op_ctrl;
+ #define VDA_DEV_OP_CTRL_START 0x01
+ #define VDA_DEV_OP_CTRL_HALT 0x02
+ #define VDA_DEV_OP_CTRL_RESUME 0x03
+ #define VDA_DEV_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 member_state;
+ #define VDAMBRSTATE_ONLINE 0x00
+ #define VDAMBRSTATE_DEGRADED 0x01
+ #define VDAMBRSTATE_UNAVAIL 0x02
+ #define VDAMBRSTATE_FAULTED 0x03
+ #define VDAMBRSTATE_MISREAD 0x04
+ #define VDAMBRSTATE_INCOMPAT 0x05
+
+ u8 operation;
+ #define VDAOP_NONE 0x00
+ #define VDAOP_REBUILD 0x01
+ #define VDAOP_ERASE 0x02
+ #define VDAOP_PATTERN 0x03
+ #define VDAOP_CONVERSION 0x04
+ #define VDAOP_FULL_INIT 0x05
+ #define VDAOP_QUICK_INIT 0x06
+ #define VDAOP_SECT_SCAN 0x07
+ #define VDAOP_SECT_SCAN_PARITY 0x08
+ #define VDAOP_SECT_SCAN_PARITY_FIX 0x09
+ #define VDAOP_RECOV_REBUILD 0x0A
+
+ u8 op_status;
+ #define VDAOPSTAT_OK 0x00
+ #define VDAOPSTAT_FAULTED 0x01
+ #define VDAOPSTAT_HALTED 0x02
+ #define VDAOPSTAT_INT 0x03
+
+ u8 progress; /* 0 - 100% */
+ u16 ses_dev_index;
+ #define VDASESDI_INVALID 0xFFFF
+
+ u8 serial_no[32];
+
+ union {
+ u16 target_id;
+ #define VDATGTID_INVALID 0xFFFF
+
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ #define VDADEVFEAT_ENC_SERV 0x0001
+ #define VDADEVFEAT_IDENT 0x0002
+ #define VDADEVFEAT_DH_SUPP 0x0004
+ #define VDADEVFEAT_PHYS_ID 0x0008
+
+ u8 ses_element_id;
+ u8 link_speed;
+ #define VDALINKSPEED_UNKNOWN 0x00
+ #define VDALINKSPEED_1GB 0x01
+ #define VDALINKSPEED_1_5GB 0x02
+ #define VDALINKSPEED_2GB 0x03
+ #define VDALINKSPEED_3GB 0x04
+ #define VDALINKSPEED_4GB 0x05
+ #define VDALINKSPEED_6GB 0x06
+ #define VDALINKSPEED_8GB 0x07
+
+ u16 phys_target_id;
+ u8 reserved[2];
+};
+
+
+/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
+ * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
+ * the entire structure is DMaed between the firmware and host buffer and
+ * the data will always be in little endian format.
+ */
+
+struct __packed atto_vda_devinfo2 {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+ u8 dev_status;
+ u8 member_state;
+ u8 operation;
+ u8 op_status;
+ u8 progress;
+ u16 ses_dev_index;
+ u8 serial_no[32];
+ union {
+ u16 target_id;
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ u8 ses_element_id;
+ u8 link_speed;
+ u16 phys_target_id;
+ u8 reserved[2];
+
+/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
+ * that the structure version started at one so applications that unionize this
+ * structure with atto_vda_dev_info can differentiate them if desired.
+ */
+
+ u8 version;
+ #define VDADEVINFO_VERSION0 0x00
+ #define VDADEVINFO_VERSION1 0x01
+ #define VDADEVINFO_VERSION2 0x02
+ #define VDADEVINFO_VERSION3 0x03
+ #define VDADEVINFO_VERSION VDADEVINFO_VERSION3
+
+ u8 reserved2[3];
+
+ /* sector scanning fields */
+
+ u32 ss_curr_errors;
+ u64 ss_curr_scanned;
+ u32 ss_curr_recvrd;
+ u32 ss_scan_length;
+ u32 ss_total_errors;
+ u32 ss_total_recvrd;
+ u32 ss_num_scans;
+
+ /* grp_name was added in version 2 of this structure. */
+
+ char grp_name[15];
+ u8 reserved3[4];
+
+ /* dev_addr_list was added in version 3 of this structure. */
+
+ u8 num_dev_addr;
+ struct atto_dev_addr2 dev_addr_list[8];
+};
+
+
+struct __packed atto_vda_grp_info {
+ u8 grp_index;
+ #define VDA_MAX_RAID_GROUPS 32
+
+ char grp_name[15];
+ u64 capacity;
+ u32 block_size;
+ u32 interleave;
+ u8 type;
+ #define VDA_GRP_TYPE_RAID0 0
+ #define VDA_GRP_TYPE_RAID1 1
+ #define VDA_GRP_TYPE_RAID4 4
+ #define VDA_GRP_TYPE_RAID5 5
+ #define VDA_GRP_TYPE_RAID6 6
+ #define VDA_GRP_TYPE_RAID10 10
+ #define VDA_GRP_TYPE_RAID40 40
+ #define VDA_GRP_TYPE_RAID50 50
+ #define VDA_GRP_TYPE_RAID60 60
+ #define VDA_GRP_TYPE_DVRAID_HS 252
+ #define VDA_GRP_TYPE_DVRAID_NOHS 253
+ #define VDA_GRP_TYPE_JBOD 254
+ #define VDA_GRP_TYPE_SPARE 255
+
+ union {
+ u8 status;
+ #define VDA_GRP_STAT_INVALID 0x00
+ #define VDA_GRP_STAT_NEW 0x01
+ #define VDA_GRP_STAT_WAITING 0x02
+ #define VDA_GRP_STAT_ONLINE 0x03
+ #define VDA_GRP_STAT_DEGRADED 0x04
+ #define VDA_GRP_STAT_OFFLINE 0x05
+ #define VDA_GRP_STAT_DELETED 0x06
+ #define VDA_GRP_STAT_RECOV_BASIC 0x07
+ #define VDA_GRP_STAT_RECOV_EXTREME 0x08
+
+ u8 op_ctrl;
+ #define VDA_GRP_OP_CTRL_START 0x01
+ #define VDA_GRP_OP_CTRL_HALT 0x02
+ #define VDA_GRP_OP_CTRL_RESUME 0x03
+ #define VDA_GRP_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 rebuild_state;
+ #define VDA_RBLD_NONE 0x00
+ #define VDA_RBLD_REBUILD 0x01
+ #define VDA_RBLD_ERASE 0x02
+ #define VDA_RBLD_PATTERN 0x03
+ #define VDA_RBLD_CONV 0x04
+ #define VDA_RBLD_FULL_INIT 0x05
+ #define VDA_RBLD_QUICK_INIT 0x06
+ #define VDA_RBLD_SECT_SCAN 0x07
+ #define VDA_RBLD_SECT_SCAN_PARITY 0x08
+ #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
+ #define VDA_RBLD_RECOV_REBUILD 0x0A
+ #define VDA_RBLD_RECOV_BASIC 0x0B
+ #define VDA_RBLD_RECOV_EXTREME 0x0C
+
+ u8 span_depth;
+ u8 progress;
+ u8 mirror_width;
+ u8 stripe_width;
+ u8 member_cnt;
+
+ union {
+ u16 members[32];
+ #define VDA_MEMBER_MISSING 0xFFFF
+ #define VDA_MEMBER_NEW 0xFFFE
+ u16 features_mask;
+ };
+
+ u16 features;
+ #define VDA_GRP_FEAT_HOTSWAP 0x0001
+ #define VDA_GRP_FEAT_SPDRD_MASK 0x0006
+ #define VDA_GRP_FEAT_SPDRD_DIS 0x0000
+ #define VDA_GRP_FEAT_SPDRD_ENB 0x0002
+ #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
+ #define VDA_GRP_FEAT_IDENT 0x0008
+ #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
+ #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
+ #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
+ #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
+ #define VDA_GRP_FEAT_WRITE_CACHE 0x0040
+ #define VDA_GRP_FEAT_RBLD_RESUME 0x0080
+ #define VDA_GRP_FEAT_SECT_RESUME 0x0100
+ #define VDA_GRP_FEAT_INIT_RESUME 0x0200
+ #define VDA_GRP_FEAT_SSD 0x0400
+ #define VDA_GRP_FEAT_BOOT_DEV 0x0800
+
+ /*
+ * for backward compatibility, a prefetch value of zero means the
+ * setting is ignored/unsupported. therefore, the firmware supported
+ * 0-6 values are incremented to 1-7.
+ */
+
+ u8 prefetch;
+ u8 op_status;
+ #define VDAGRPOPSTAT_MASK 0x0F
+ #define VDAGRPOPSTAT_INVALID 0x00
+ #define VDAGRPOPSTAT_OK 0x01
+ #define VDAGRPOPSTAT_FAULTED 0x02
+ #define VDAGRPOPSTAT_HALTED 0x03
+ #define VDAGRPOPSTAT_INT 0x04
+ #define VDAGRPOPPROC_MASK 0xF0
+ #define VDAGRPOPPROC_STARTABLE 0x10
+ #define VDAGRPOPPROC_CANCELABLE 0x20
+ #define VDAGRPOPPROC_RESUMABLE 0x40
+ #define VDAGRPOPPROC_HALTABLE 0x80
+ u8 over_provision;
+ u8 reserved[3];
+
+};
+
+
+struct __packed atto_vdapart_info {
+ u8 part_no;
+ #define VDA_MAX_PARTITIONS 128
+
+ char grp_name[15];
+ u64 part_size;
+ u64 start_lba;
+ u32 block_size;
+ u16 target_id;
+ u8 LUN;
+ char serial_no[41];
+ u8 features;
+ #define VDAPI_FEAT_WRITE_CACHE 0x01
+
+ u8 reserved[7];
+};
+
+
+struct __packed atto_vda_dh_info {
+ u8 req_type;
+ #define VDADH_RQTYPE_CACHE 0x01
+ #define VDADH_RQTYPE_FETCH 0x02
+ #define VDADH_RQTYPE_SET_STAT 0x03
+ #define VDADH_RQTYPE_GET_STAT 0x04
+
+ u8 req_qual;
+ #define VDADH_RQQUAL_SMART 0x01
+ #define VDADH_RQQUAL_MEDDEF 0x02
+ #define VDADH_RQQUAL_INFOEXC 0x04
+
+ u8 num_smart_attribs;
+ u8 status;
+ #define VDADH_STAT_DISABLE 0x00
+ #define VDADH_STAT_ENABLE 0x01
+
+ u32 med_defect_cnt;
+ u32 info_exc_cnt;
+ u8 smart_status;
+ #define VDADH_SMARTSTAT_OK 0x00
+ #define VDADH_SMARTSTAT_ERR 0x01
+
+ u8 reserved[35];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_dh_smart {
+ u8 attrib_id;
+ u8 current_val;
+ u8 worst;
+ u8 threshold;
+ u8 raw_data[6];
+ u8 raw_attrib_status;
+ #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
+ #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
+ #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
+ #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
+ #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
+ #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
+
+ u8 calc_attrib_status;
+ #define VDADHSM_CALCSTAT_UNKNOWN 0x00
+ #define VDADHSM_CALCSTAT_GOOD 0x01
+ #define VDADHSM_CALCSTAT_PREFAIL 0x02
+ #define VDADHSM_CALCSTAT_OLDAGE 0x03
+
+ u8 reserved[4];
+};
+
+
+struct __packed atto_vda_metrics_info {
+ u8 data_version;
+ #define VDAMET_VERSION0 0x00
+ #define VDAMET_VERSION VDAMET_VERSION0
+
+ u8 metrics_action;
+ #define VDAMET_METACT_NONE 0x00
+ #define VDAMET_METACT_START 0x01
+ #define VDAMET_METACT_STOP 0x02
+ #define VDAMET_METACT_RETRIEVE 0x03
+ #define VDAMET_METACT_CLEAR 0x04
+
+ u8 test_action;
+ #define VDAMET_TSTACT_NONE 0x00
+ #define VDAMET_TSTACT_STRT_INIT 0x01
+ #define VDAMET_TSTACT_STRT_READ 0x02
+ #define VDAMET_TSTACT_STRT_VERIFY 0x03
+ #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
+ #define VDAMET_TSTACT_STOP 0x05
+
+ u8 num_dev_indexes;
+ #define VDAMET_ALL_DEVICES 0xFF
+
+ u16 dev_indexes[32];
+ u8 reserved[12];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_metrics_data {
+ u16 dev_index;
+ u16 length;
+ #define VDAMD_LEN_LAST 0x8000
+ #define VDAMD_LEN_MASK 0x0FFF
+
+ u32 flags;
+ #define VDAMDF_RUN 0x00000007
+ #define VDAMDF_RUN_READ 0x00000001
+ #define VDAMDF_RUN_WRITE 0x00000002
+ #define VDAMDF_RUN_ALL 0x00000004
+ #define VDAMDF_READ 0x00000010
+ #define VDAMDF_WRITE 0x00000020
+ #define VDAMDF_ALL 0x00000040
+ #define VDAMDF_DRIVETEST 0x40000000
+ #define VDAMDF_NEW 0x80000000
+
+ u64 total_read_data;
+ u64 total_write_data;
+ u64 total_read_io;
+ u64 total_write_io;
+ u64 read_start_time;
+ u64 read_stop_time;
+ u64 write_start_time;
+ u64 write_stop_time;
+ u64 read_maxio_time;
+ u64 wpvdadmetricsdatarite_maxio_time;
+ u64 read_totalio_time;
+ u64 write_totalio_time;
+ u64 read_total_errs;
+ u64 write_total_errs;
+ u64 read_recvrd_errs;
+ u64 write_recvrd_errs;
+ u64 miscompares;
+};
+
+
+struct __packed atto_vda_schedule_info {
+ u8 schedule_type;
+ #define VDASI_SCHTYPE_ONETIME 0x01
+ #define VDASI_SCHTYPE_DAILY 0x02
+ #define VDASI_SCHTYPE_WEEKLY 0x03
+
+ u8 operation;
+ #define VDASI_OP_NONE 0x00
+ #define VDASI_OP_CREATE 0x01
+ #define VDASI_OP_CANCEL 0x02
+
+ u8 hour;
+ u8 minute;
+ u8 day;
+ #define VDASI_DAY_NONE 0x00
+
+ u8 progress;
+ #define VDASI_PROG_NONE 0xFF
+
+ u8 event_type;
+ #define VDASI_EVTTYPE_SECT_SCAN 0x01
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
+
+ u8 recurrences;
+ #define VDASI_RECUR_FOREVER 0x00
+
+ u32 id;
+ #define VDASI_ID_NONE 0x00
+
+ char grp_name[15];
+ u8 reserved[85];
+};
+
+
+struct __packed atto_vda_n_vcache_info {
+ u8 super_cap_status;
+ #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
+ #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
+ #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
+
+ u8 nvcache_module_status;
+ #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
+ #define VDANVCI_NVCACHEMODULE_PRESENT 0x01
+
+ u8 protection_mode;
+ #define VDANVCI_PROTMODE_HI_PROTECT 0x00
+ #define VDANVCI_PROTMODE_HI_PERFORM 0x01
+
+ u8 reserved[109];
+};
+
+
+struct __packed atto_vda_buzzer_info {
+ u8 status;
+ #define VDABUZZI_BUZZER_OFF 0x00
+ #define VDABUZZI_BUZZER_ON 0x01
+ #define VDABUZZI_BUZZER_LAST 0x02
+
+ u8 reserved[3];
+ u32 duration;
+ #define VDABUZZI_DURATION_INDEFINITE 0xffffffff
+
+ u8 reserved2[104];
+};
+
+
+struct __packed atto_vda_adapter_info {
+ u8 version;
+ #define VDAADAPINFO_VERSION0 0x00
+ #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
+
+ u8 reserved;
+ signed short utc_offset;
+ u32 utc_time;
+ u32 features;
+ #define VDA_ADAP_FEAT_IDENT 0x0001
+ #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
+ #define VDA_ADAP_FEAT_UTC_TIME 0x0004
+
+ u32 valid_features;
+ char active_config[33];
+ u8 temp_count;
+ u8 fan_count;
+ u8 reserved3[61];
+};
+
+
+struct __packed atto_vda_temp_info {
+ u8 temp_index;
+ u8 max_op_temp;
+ u8 min_op_temp;
+ u8 op_temp_warn;
+ u8 temperature;
+ u8 type;
+ #define VDA_TEMP_TYPE_CPU 1
+
+ u8 reserved[106];
+};
+
+
+struct __packed atto_vda_fan_info {
+ u8 fan_index;
+ u8 status;
+ #define VDA_FAN_STAT_UNKNOWN 0
+ #define VDA_FAN_STAT_NORMAL 1
+ #define VDA_FAN_STAT_FAIL 2
+
+ u16 crit_pvdafaninfothreshold;
+ u16 warn_threshold;
+ u16 speed;
+ u8 reserved[104];
+};
+
+
+/* VDA management commands */
+
+#define VDAMGT_DEV_SCAN 0x00
+#define VDAMGT_DEV_INFO 0x01
+#define VDAMGT_DEV_CLEAN 0x02
+#define VDAMGT_DEV_IDENTIFY 0x03
+#define VDAMGT_DEV_IDENTSTOP 0x04
+#define VDAMGT_DEV_PT_INFO 0x05
+#define VDAMGT_DEV_FEATURES 0x06
+#define VDAMGT_DEV_PT_FEATURES 0x07
+#define VDAMGT_DEV_HEALTH_REQ 0x08
+#define VDAMGT_DEV_METRICS 0x09
+#define VDAMGT_DEV_INFO2 0x0A
+#define VDAMGT_DEV_OPERATION 0x0B
+#define VDAMGT_DEV_INFO2_BYADDR 0x0C
+#define VDAMGT_GRP_INFO 0x10
+#define VDAMGT_GRP_CREATE 0x11
+#define VDAMGT_GRP_DELETE 0x12
+#define VDAMGT_ADD_STORAGE 0x13
+#define VDAMGT_MEMBER_ADD 0x14
+#define VDAMGT_GRP_COMMIT 0x15
+#define VDAMGT_GRP_REBUILD 0x16
+#define VDAMGT_GRP_COMMIT_INIT 0x17
+#define VDAMGT_QUICK_RAID 0x18
+#define VDAMGT_GRP_FEATURES 0x19
+#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
+#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
+#define VDAMGT_GRP_OPERATION 0x1C
+#define VDAMGT_CFG_SAVE 0x20
+#define VDAMGT_LAST_ERROR 0x21
+#define VDAMGT_ADAP_INFO 0x22
+#define VDAMGT_ADAP_FEATURES 0x23
+#define VDAMGT_TEMP_INFO 0x24
+#define VDAMGT_FAN_INFO 0x25
+#define VDAMGT_PART_INFO 0x30
+#define VDAMGT_PART_MAP 0x31
+#define VDAMGT_PART_UNMAP 0x32
+#define VDAMGT_PART_AUTOMAP 0x33
+#define VDAMGT_PART_SPLIT 0x34
+#define VDAMGT_PART_MERGE 0x35
+#define VDAMGT_SPARE_LIST 0x40
+#define VDAMGT_SPARE_ADD 0x41
+#define VDAMGT_SPARE_REMOVE 0x42
+#define VDAMGT_LOCAL_SPARE_ADD 0x43
+#define VDAMGT_SCHEDULE_EVENT 0x50
+#define VDAMGT_SCHEDULE_INFO 0x51
+#define VDAMGT_NVCACHE_INFO 0x60
+#define VDAMGT_NVCACHE_SET 0x61
+#define VDAMGT_BUZZER_INFO 0x70
+#define VDAMGT_BUZZER_SET 0x71
+
+
+struct __packed atto_vda_ae_hdr {
+ u8 bylength;
+ u8 byflags;
+ #define VDAAE_HDRF_EVENT_ACK 0x01
+
+ u8 byversion;
+ #define VDAAE_HDR_VER_0 0
+
+ u8 bytype;
+ #define VDAAE_HDR_TYPE_RAID 1
+ #define VDAAE_HDR_TYPE_LU 2
+ #define VDAAE_HDR_TYPE_DISK 3
+ #define VDAAE_HDR_TYPE_RESET 4
+ #define VDAAE_HDR_TYPE_LOG_INFO 5
+ #define VDAAE_HDR_TYPE_LOG_WARN 6
+ #define VDAAE_HDR_TYPE_LOG_CRIT 7
+ #define VDAAE_HDR_TYPE_LOG_FAIL 8
+ #define VDAAE_HDR_TYPE_NVC 9
+ #define VDAAE_HDR_TYPE_TLG_INFO 10
+ #define VDAAE_HDR_TYPE_TLG_WARN 11
+ #define VDAAE_HDR_TYPE_TLG_CRIT 12
+ #define VDAAE_HDR_TYPE_PWRMGT 13
+ #define VDAAE_HDR_TYPE_MUTE 14
+ #define VDAAE_HDR_TYPE_DEV 15
+};
+
+
+struct __packed atto_vda_ae_raid {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwflags;
+ #define VDAAE_GROUP_STATE 0x00000001
+ #define VDAAE_RBLD_STATE 0x00000002
+ #define VDAAE_RBLD_PROG 0x00000004
+ #define VDAAE_MEMBER_CHG 0x00000008
+ #define VDAAE_PART_CHG 0x00000010
+ #define VDAAE_MEM_STATE_CHG 0x00000020
+
+ u8 bygroup_state;
+ #define VDAAE_RAID_INVALID 0
+ #define VDAAE_RAID_NEW 1
+ #define VDAAE_RAID_WAITING 2
+ #define VDAAE_RAID_ONLINE 3
+ #define VDAAE_RAID_DEGRADED 4
+ #define VDAAE_RAID_OFFLINE 5
+ #define VDAAE_RAID_DELETED 6
+ #define VDAAE_RAID_BASIC 7
+ #define VDAAE_RAID_EXTREME 8
+ #define VDAAE_RAID_UNKNOWN 9
+
+ u8 byrebuild_state;
+ #define VDAAE_RBLD_NONE 0
+ #define VDAAE_RBLD_REBUILD 1
+ #define VDAAE_RBLD_ERASE 2
+ #define VDAAE_RBLD_PATTERN 3
+ #define VDAAE_RBLD_CONV 4
+ #define VDAAE_RBLD_FULL_INIT 5
+ #define VDAAE_RBLD_QUICK_INIT 6
+ #define VDAAE_RBLD_SECT_SCAN 7
+ #define VDAAE_RBLD_SECT_SCAN_PARITY 8
+ #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
+ #define VDAAE_RBLD_RECOV_REBUILD 10
+ #define VDAAE_RBLD_UNKNOWN 11
+
+ u8 byrebuild_progress;
+ u8 op_status;
+ #define VDAAE_GRPOPSTAT_MASK 0x0F
+ #define VDAAE_GRPOPSTAT_INVALID 0x00
+ #define VDAAE_GRPOPSTAT_OK 0x01
+ #define VDAAE_GRPOPSTAT_FAULTED 0x02
+ #define VDAAE_GRPOPSTAT_HALTED 0x03
+ #define VDAAE_GRPOPSTAT_INT 0x04
+ #define VDAAE_GRPOPPROC_MASK 0xF0
+ #define VDAAE_GRPOPPROC_STARTABLE 0x10
+ #define VDAAE_GRPOPPROC_CANCELABLE 0x20
+ #define VDAAE_GRPOPPROC_RESUMABLE 0x40
+ #define VDAAE_GRPOPPROC_HALTABLE 0x80
+ char acname[15];
+ u8 byreserved;
+ u8 byreserved2[0x80 - 0x1C];
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun_raid {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+ u32 dwinterleave;
+ u32 dwblock_size;
+};
+
+
+struct __packed atto_vda_ae_lu {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwevent;
+ #define VDAAE_LU_DISC 0x00000001
+ #define VDAAE_LU_LOST 0x00000002
+ #define VDAAE_LU_STATE 0x00000004
+ #define VDAAE_LU_PASSTHROUGH 0x10000000
+ #define VDAAE_LU_PHYS_ID 0x20000000
+
+ u8 bystate;
+ #define VDAAE_LU_UNDEFINED 0
+ #define VDAAE_LU_NOT_PRESENT 1
+ #define VDAAE_LU_OFFLINE 2
+ #define VDAAE_LU_ONLINE 3
+ #define VDAAE_LU_DEGRADED 4
+ #define VDAAE_LU_FACTORY_DISABLED 5
+ #define VDAAE_LU_DELETED 6
+ #define VDAAE_LU_BUSSCAN 7
+ #define VDAAE_LU_UNKNOWN 8
+
+ u8 byreserved;
+ u16 wphys_target_id;
+
+ union {
+ struct atto_vda_ae_lu_tgt_lun tgtlun;
+ struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
+ } id;
+};
+
+
+struct __packed atto_vda_ae_disk {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+#define VDAAE_LOG_STRSZ 64
+
+struct __packed atto_vda_ae_log {
+ struct atto_vda_ae_hdr hdr;
+ char aclog_ascii[VDAAE_LOG_STRSZ];
+};
+
+
+#define VDAAE_TLG_STRSZ 56
+
+struct __packed atto_vda_ae_timestamp_log {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwtimestamp;
+ char aclog_ascii[VDAAE_TLG_STRSZ];
+};
+
+
+struct __packed atto_vda_ae_nvc {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+struct __packed atto_vda_ae_dev {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_dev_addr devaddr;
+};
+
+
+union atto_vda_ae {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_vda_ae_disk disk;
+ struct atto_vda_ae_lu lu;
+ struct atto_vda_ae_raid raid;
+ struct atto_vda_ae_log log;
+ struct atto_vda_ae_timestamp_log tslog;
+ struct atto_vda_ae_nvc nvcache;
+ struct atto_vda_ae_dev dev;
+};
+
+
+struct __packed atto_vda_date_and_time {
+ u8 flags;
+ #define VDA_DT_DAY_MASK 0x07
+ #define VDA_DT_DAY_NONE 0x00
+ #define VDA_DT_DAY_SUN 0x01
+ #define VDA_DT_DAY_MON 0x02
+ #define VDA_DT_DAY_TUE 0x03
+ #define VDA_DT_DAY_WED 0x04
+ #define VDA_DT_DAY_THU 0x05
+ #define VDA_DT_DAY_FRI 0x06
+ #define VDA_DT_DAY_SAT 0x07
+ #define VDA_DT_PM 0x40
+ #define VDA_DT_MILITARY 0x80
+
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 day;
+ u8 month;
+ u16 year;
+};
+
+#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
+#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
+#define SGE_LAST 0x01000000 /*! last entry */
+#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
+#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
+#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
+#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
+
+
+struct __packed atto_vda_cfg_init {
+ struct atto_vda_date_and_time date_time;
+ u32 sgl_page_size;
+ u32 vda_version;
+ u32 fw_version;
+ u32 fw_build;
+ u32 fw_release;
+ u32 epoch_time;
+ u32 ioctl_tunnel;
+ #define VDA_ITF_MEM_RW 0x00000001
+ #define VDA_ITF_TRACE 0x00000002
+ #define VDA_ITF_SCSI_PASS_THRU 0x00000004
+ #define VDA_ITF_GET_DEV_ADDR 0x00000008
+ #define VDA_ITF_PHY_CTRL 0x00000010
+ #define VDA_ITF_CONN_CTRL 0x00000020
+ #define VDA_ITF_GET_DEV_INFO 0x00000040
+
+ u32 num_targets_backend;
+ u8 reserved[0x48];
+};
+
+
+/* configuration commands */
+
+#define VDA_CFG_INIT 0x00
+#define VDA_CFG_GET_INIT 0x01
+#define VDA_CFG_GET_INIT2 0x02
+
+
+/*! physical region descriptor (PRD) aka scatter/gather entry */
+
+struct __packed atto_physical_region_description {
+ u64 address;
+ u32 ctl_len;
+ #define PRD_LEN_LIMIT 0x003FFFFF
+ #define PRD_LEN_MAX 0x003FF000
+ #define PRD_NXT_PRD_CNT 0x0000007F
+ #define PRD_CHAIN 0x01000000
+ #define PRD_DATA 0x00000000
+ #define PRD_INT_SEL 0xF0000000
+ #define PRD_INT_SEL_F0 0x00000000
+ #define PRD_INT_SEL_F1 0x40000000
+ #define PRD_INT_SEL_F2 0x80000000
+ #define PRD_INT_SEL_F3 0xc0000000
+ #define PRD_INT_SEL_SRAM 0x10000000
+ #define PRD_INT_SEL_PBSR 0x20000000
+
+};
+
+/* Request types. NOTE that ALL requests have the same layout for the first
+ * few bytes.
+ */
+struct __packed atto_vda_req_header {
+ u32 length;
+ u8 function;
+ u8 variable1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+};
+
+
+#define FCP_CDB_SIZE 16
+
+struct __packed atto_vda_scsi_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_SCSI */
+ u8 sense_len;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flags;
+ #define FCP_CMND_LUN_MASK 0x000000FF
+ #define FCP_CMND_TA_MASK 0x00000700
+ #define FCP_CMND_TA_SIMPL_Q 0x00000000
+ #define FCP_CMND_TA_HEAD_Q 0x00000100
+ #define FCP_CMND_TA_ORDRD_Q 0x00000200
+ #define FCP_CMND_TA_ACA 0x00000400
+ #define FCP_CMND_PRI_MASK 0x00007800
+ #define FCP_CMND_TM_MASK 0x00FF0000
+ #define FCP_CMND_ATS 0x00020000
+ #define FCP_CMND_CTS 0x00040000
+ #define FCP_CMND_LRS 0x00100000
+ #define FCP_CMND_TRS 0x00200000
+ #define FCP_CMND_CLA 0x00400000
+ #define FCP_CMND_TRM 0x00800000
+ #define FCP_CMND_DATA_DIR 0x03000000
+ #define FCP_CMND_WRD 0x01000000
+ #define FCP_CMND_RDD 0x02000000
+
+ u8 cdb[FCP_CDB_SIZE];
+ union {
+ struct __packed {
+ u64 ppsense_buf;
+ u16 target_id;
+ u8 iblk_cnt_prd;
+ u8 reserved;
+ };
+
+ struct atto_physical_region_description sense_buff_prd;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+
+ u32 abort_handle;
+ u32 dwords[245];
+ struct atto_physical_region_description prd[1];
+ } u;
+};
+
+
+struct __packed atto_vda_flash_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_FLASH */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flash_addr;
+ u8 checksum;
+ u8 rsvd[3];
+
+ union {
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ struct atto_vda_sge sge[1];
+ } file;
+
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[2];
+ } data;
+};
+
+
+struct __packed atto_vda_diag_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_DIAG */
+ u8 sub_func;
+ #define VDA_DIAG_STATUS 0x00
+ #define VDA_DIAG_RESET 0x01
+ #define VDA_DIAG_PAUSE 0x02
+ #define VDA_DIAG_RESUME 0x03
+ #define VDA_DIAG_READ 0x04
+ #define VDA_DIAG_WRITE 0x05
+
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 rsvd;
+ u64 local_addr;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ae_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_AE */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cli_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CLI */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 cmd_rsp_len;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ioctl_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_IOCTL */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge reserved_sge;
+ struct atto_physical_region_description reserved_prde;
+ };
+
+ union {
+ struct {
+ u32 ctrl_code;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cfg_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CFG */
+ u8 sub_func;
+ u8 rsvd1;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ u8 bytes[116];
+ struct atto_vda_cfg_init init;
+ struct atto_vda_sge sge;
+ struct atto_physical_region_description prde;
+ } data;
+};
+
+
+struct __packed atto_vda_mgmt_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_MGT */
+ u8 mgt_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u8 scan_generation;
+ u8 payld_sglst_offset;
+ u16 dev_index;
+ u32 payld_length;
+ u32 pad;
+ union {
+ struct atto_vda_sge sge[2];
+ struct atto_physical_region_description prde[2];
+ };
+ struct atto_vda_sge payld_sge[1];
+};
+
+
+union atto_vda_req {
+ struct atto_vda_scsi_req scsi;
+ struct atto_vda_flash_req flash;
+ struct atto_vda_diag_req diag;
+ struct atto_vda_ae_req ae;
+ struct atto_vda_cli_req cli;
+ struct atto_vda_ioctl_req ioctl;
+ struct atto_vda_cfg_req cfg;
+ struct atto_vda_mgmt_req mgt;
+ u8 bytes[1024];
+};
+
+/* Outbound response structures */
+
+struct __packed atto_vda_scsi_rsp {
+ u8 scsi_stat;
+ u8 sense_len;
+ u8 rsvd[2];
+ u32 residual_length;
+};
+
+struct __packed atto_vda_flash_rsp {
+ u32 file_size;
+};
+
+struct __packed atto_vda_ae_rsp {
+ u32 length;
+};
+
+struct __packed atto_vda_cli_rsp {
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_vda_ioctl_rsp {
+ union {
+ struct {
+ u32 csmi_status;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+};
+
+struct __packed atto_vda_cfg_rsp {
+ u16 vda_version;
+ u16 fw_release;
+ u32 fw_build;
+};
+
+struct __packed atto_vda_mgmt_rsp {
+ u32 length;
+ u16 dev_index;
+ u8 scan_generation;
+};
+
+union atto_vda_func_rsp {
+ struct atto_vda_scsi_rsp scsi_rsp;
+ struct atto_vda_flash_rsp flash_rsp;
+ struct atto_vda_ae_rsp ae_rsp;
+ struct atto_vda_cli_rsp cli_rsp;
+ struct atto_vda_ioctl_rsp ioctl_rsp;
+ struct atto_vda_cfg_rsp cfg_rsp;
+ struct atto_vda_mgmt_rsp mgt_rsp;
+ u32 dwords[2];
+};
+
+struct __packed atto_vda_ob_rsp {
+ u32 handle;
+ u8 req_stat;
+ u8 rsvd[3];
+
+ union atto_vda_func_rsp
+ func_rsp;
+};
+
+struct __packed atto_vda_ae_data {
+ u8 event_data[256];
+};
+
+struct __packed atto_vda_mgmt_data {
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dev_health_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ } data;
+};
+
+union atto_vda_rsp_data {
+ struct atto_vda_ae_data ae_data;
+ struct atto_vda_mgmt_data mgt_data;
+ u8 sense_data[252];
+ #define SENSE_DATA_SZ 252;
+ u8 bytes[256];
+};
+
+#endif
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
new file mode 100644
index 000000000000..3fd305d6b67d
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -0,0 +1,1431 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esas2r_log.h"
+#include "atioctl.h"
+#include "atvda.h"
+
+#ifndef ESAS2R_H
+#define ESAS2R_H
+
+/* Global Variables */
+extern struct esas2r_adapter *esas2r_adapters[];
+extern u8 *esas2r_buffered_ioctl;
+extern dma_addr_t esas2r_buffered_ioctl_addr;
+extern u32 esas2r_buffered_ioctl_size;
+extern struct pci_dev *esas2r_buffered_ioctl_pcid;
+#define SGL_PG_SZ_MIN 64
+#define SGL_PG_SZ_MAX 1024
+extern int sgl_page_size;
+#define NUM_SGL_MIN 8
+#define NUM_SGL_MAX 2048
+extern int num_sg_lists;
+#define NUM_REQ_MIN 4
+#define NUM_REQ_MAX 256
+extern int num_requests;
+#define NUM_AE_MIN 2
+#define NUM_AE_MAX 8
+extern int num_ae_requests;
+extern int cmd_per_lun;
+extern int can_queue;
+extern int esas2r_max_sectors;
+extern int sg_tablesize;
+extern int interrupt_mode;
+extern int num_io_requests;
+
+/* Macro defintions */
+#define ESAS2R_MAX_ID 255
+#define MAX_ADAPTERS 32
+#define ESAS2R_DRVR_NAME "esas2r"
+#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
+#define ESAS2R_MAX_DEVICES 32
+#define ATTONODE_NAME "ATTONode"
+#define ESAS2R_MAJOR_REV 1
+#define ESAS2R_MINOR_REV 00
+#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
+ DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
+#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
+#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
+#define ESAS2R_DEFAULT_CMD_PER_LUN 64
+#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
+#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
+#define NUM_TO_STR(num) #num
+
+#define ESAS2R_SGL_ALIGN 16
+#define ESAS2R_LIST_ALIGN 16
+#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
+#define ESAS2R_DATA_BUF_LEN 256
+#define ESAS2R_DEFAULT_TMO 5000
+#define ESAS2R_DISC_BUF_LEN 512
+#define ESAS2R_FWCOREDUMP_SZ 0x80000
+#define ESAS2R_NUM_PHYS 8
+#define ESAS2R_TARG_ID_INV 0xFFFF
+#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_DIS_MASK 0
+#define ESAS2R_MAX_TARGETS 256
+#define ESAS2R_KOBJ_NAME_LEN 20
+
+/* u16 (WORD) component macros */
+#define LOBYTE(w) ((u8)(u16)(w))
+#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
+#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
+
+/* u32 (DWORD) component macros */
+#define LOWORD(d) ((u16)(u32)(d))
+#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
+#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
+
+/* macro to get the lowest nonzero bit of a value */
+#define LOBIT(x) ((x) & (0 - (x)))
+
+/* These functions are provided to access the chip's control registers.
+ * The register is specified by its byte offset from the register base
+ * for the adapter.
+ */
+#define esas2r_read_register_dword(a, reg) \
+ readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
+
+#define esas2r_write_register_dword(a, reg, data) \
+ writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
+
+#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
+
+/* This function is provided to access the chip's data window. The
+ * register is specified by its byte offset from the window base
+ * for the adapter.
+ */
+#define esas2r_read_data_byte(a, reg) \
+ readb((void __iomem *)a->data_window + (reg))
+
+/* ATTO vendor and device Ids */
+#define ATTO_VENDOR_ID 0x117C
+#define ATTO_DID_INTEL_IOP348 0x002C
+#define ATTO_DID_MV_88RC9580 0x0049
+#define ATTO_DID_MV_88RC9580TS 0x0066
+#define ATTO_DID_MV_88RC9580TSE 0x0067
+#define ATTO_DID_MV_88RC9580TL 0x0068
+
+/* ATTO subsystem device Ids */
+#define ATTO_SSDID_TBT 0x4000
+#define ATTO_TSSC_3808 0x4066
+#define ATTO_TSSC_3808E 0x4067
+#define ATTO_TLSH_1068 0x4068
+#define ATTO_ESAS_R680 0x0049
+#define ATTO_ESAS_R608 0x004A
+#define ATTO_ESAS_R60F 0x004B
+#define ATTO_ESAS_R6F0 0x004C
+#define ATTO_ESAS_R644 0x004D
+#define ATTO_ESAS_R648 0x004E
+
+/*
+ * flash definitions & structures
+ * define the code types
+ */
+#define FBT_CPYR 0xAA00
+#define FBT_SETUP 0xAA02
+#define FBT_FLASH_VER 0xAA04
+
+/* offsets to various locations in flash */
+#define FLS_OFFSET_BOOT (u32)(0x00700000)
+#define FLS_OFFSET_NVR (u32)(0x007C0000)
+#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
+#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
+#define FLS_BLOCK_SIZE (u32)(0x00020000)
+#define FI_NVR_2KB 0x0800
+#define FI_NVR_8KB 0x2000
+#define FM_BUF_SZ 0x800
+
+/*
+ * marvell frey (88R9580) register definitions
+ * chip revision identifiers
+ */
+#define MVR_FREY_B2 0xB2
+
+/*
+ * memory window definitions. window 0 is the data window with definitions
+ * of MW_DATA_XXX. window 1 is the register window with definitions of
+ * MW_REG_XXX.
+ */
+#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
+#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
+#define MW_REG_OFFSET_PCI (u32)(0x00008000)
+#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
+#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
+#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
+#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
+#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
+
+/*
+ * the following registers are for the communication
+ * list interface (AKA message unit (MU))
+ */
+#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
+#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
+
+#define MU_IN_LIST_WRITE (u32)(0x00004018)
+ #define MU_ILW_TOGGLE (u32)(0x00004000)
+
+#define MU_IN_LIST_READ (u32)(0x0000401C)
+ #define MU_ILR_TOGGLE (u32)(0x00004000)
+ #define MU_ILIC_LIST (u32)(0x0000000F)
+ #define MU_ILIC_LIST_F0 (u32)(0x00000000)
+ #define MU_ILIC_DEST (u32)(0x00000F00)
+ #define MU_ILIC_DEST_DDR (u32)(0x00000200)
+#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
+
+#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
+ #define MU_ILC_ENABLE (u32)(0x00000001)
+ #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
+ #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_ILC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
+#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
+
+#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
+#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
+
+#define MU_OUT_LIST_WRITE (u32)(0x00004068)
+ #define MU_OLW_TOGGLE (u32)(0x00004000)
+
+#define MU_OUT_LIST_COPY (u32)(0x0000406C)
+ #define MU_OLC_TOGGLE (u32)(0x00004000)
+ #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
+
+#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
+ #define MU_OLIC_LIST (u32)(0x0000000F)
+ #define MU_OLIC_LIST_F0 (u32)(0x00000000)
+ #define MU_OLIC_SOURCE (u32)(0x00000F00)
+ #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
+
+#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
+ #define MU_OLC_ENABLE (u32)(0x00000001)
+ #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_OLC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
+ #define MU_OLIS_INT (u32)(0x00000001)
+
+#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
+ #define MU_OLIS_MASK (u32)(0x00000001)
+
+/*
+ * the maximum size of the communication lists is two greater than the
+ * maximum amount of VDA requests. the extra are to prevent queue overflow.
+ */
+#define ESAS2R_MAX_NUM_REQS 256
+#define ESAS2R_NUM_EXTRA 2
+#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
+
+/*
+ * the following registers are for the CPU interface
+ */
+#define MU_CTL_STATUS_IN (u32)(0x00010108)
+ #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
+#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
+ #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
+#define MU_DOORBELL_IN (u32)(0x00010460)
+ #define DRBL_RESET_BUS (u32)(0x00000002)
+ #define DRBL_PAUSE_AE (u32)(0x00000004)
+ #define DRBL_RESUME_AE (u32)(0x00000008)
+ #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
+ #define DRBL_FLASH_REQ (u32)(0x00000020)
+ #define DRBL_FLASH_DONE (u32)(0x00000040)
+ #define DRBL_FORCE_INT (u32)(0x00000080)
+ #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
+ #define DRBL_POWER_DOWN (u32)(0x00000200)
+ #define DRBL_DRV_VER_1 (u32)(0x00010000)
+ #define DRBL_DRV_VER DRBL_DRV_VER_1
+#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
+#define MU_DOORBELL_OUT (u32)(0x00010480)
+ #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
+ #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
+ #define DRBL_UNDEF_INSTR (u32)(0x00200000)
+ #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
+ #define DRBL_DATA_ABORT (u32)(0x00400000)
+ #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
+ #define DRBL_FW_RESET (u32)(0x00080000)
+ #define DRBL_FW_VER_MSK (u32)(0x00070000)
+ #define DRBL_FW_VER_0 (u32)(0x00000000)
+ #define DRBL_FW_VER_1 (u32)(0x00010000)
+ #define DRBL_FW_VER DRBL_FW_VER_1
+#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
+ #define DRBL_ENB_MASK (u32)(0x00F803FF)
+#define MU_INT_STATUS_OUT (u32)(0x00010200)
+ #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
+ #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
+ #define MU_INTSTAT_DRBL (u32)(0x00001000)
+ #define MU_INTSTAT_MASK (u32)(0x00001010)
+#define MU_INT_MASK_OUT (u32)(0x0001020C)
+
+/* PCI express registers accessed via window 1 */
+#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
+ #define MVRPW1R_ENABLE (u32)(0x00000001)
+
+
+/* structures */
+
+/* inbound list dynamic source entry */
+struct esas2r_inbound_list_source_entry {
+ u64 address;
+ u32 length;
+ #define HWILSE_INTERFACE_F0 0x00000000
+ u32 reserved;
+};
+
+/* PCI data structure in expansion ROM images */
+struct __packed esas2r_boot_header {
+ char signature[4];
+ u16 vendor_id;
+ u16 device_id;
+ u16 VPD;
+ u16 struct_length;
+ u8 struct_revision;
+ u8 class_code[3];
+ u16 image_length;
+ u16 code_revision;
+ u8 code_type;
+ #define CODE_TYPE_PC 0
+ #define CODE_TYPE_OPEN 1
+ #define CODE_TYPE_EFI 3
+ u8 indicator;
+ #define INDICATOR_LAST 0x80
+ u8 reserved[2];
+};
+
+struct __packed esas2r_boot_image {
+ u16 signature;
+ u8 reserved[22];
+ u16 header_offset;
+ u16 pnp_offset;
+};
+
+struct __packed esas2r_pc_image {
+ u16 signature;
+ u8 length;
+ u8 entry_point[3];
+ u8 checksum;
+ u16 image_end;
+ u16 min_size;
+ u8 rom_flags;
+ u8 reserved[12];
+ u16 header_offset;
+ u16 pnp_offset;
+ struct esas2r_boot_header boot_image;
+};
+
+struct __packed esas2r_efi_image {
+ u16 signature;
+ u16 length;
+ u32 efi_signature;
+ #define EFI_ROM_SIG 0x00000EF1
+ u16 image_type;
+ #define EFI_IMAGE_APP 10
+ #define EFI_IMAGE_BSD 11
+ #define EFI_IMAGE_RTD 12
+ u16 machine_type;
+ #define EFI_MACHINE_IA32 0x014c
+ #define EFI_MACHINE_IA64 0x0200
+ #define EFI_MACHINE_X64 0x8664
+ #define EFI_MACHINE_EBC 0x0EBC
+ u16 compression;
+ #define EFI_UNCOMPRESSED 0x0000
+ #define EFI_COMPRESSED 0x0001
+ u8 reserved[8];
+ u16 efi_offset;
+ u16 header_offset;
+ u16 reserved2;
+ struct esas2r_boot_header boot_image;
+};
+
+struct esas2r_adapter;
+struct esas2r_sg_context;
+struct esas2r_request;
+
+typedef void (*RQCALLBK) (struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+
+struct esas2r_component_header {
+ u8 img_type;
+ #define CH_IT_FW 0x00
+ #define CH_IT_NVR 0x01
+ #define CH_IT_BIOS 0x02
+ #define CH_IT_MAC 0x03
+ #define CH_IT_CFG 0x04
+ #define CH_IT_EFI 0x05
+ u8 status;
+ #define CH_STAT_PENDING 0xff
+ #define CH_STAT_FAILED 0x00
+ #define CH_STAT_SUCCESS 0x01
+ #define CH_STAT_RETRY 0x02
+ #define CH_STAT_INVALID 0x03
+ u8 pad[2];
+ u32 version;
+ u32 length;
+ u32 image_offset;
+};
+
+#define FI_REL_VER_SZ 16
+
+struct esas2r_flash_img_v0 {
+ u8 fi_version;
+ #define FI_VERSION_0 00
+ u8 status;
+ u8 adap_typ;
+ u8 action;
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ u16 num_comps;
+ #define FI_NUM_COMPS_V0 5
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+struct esas2r_flash_img {
+ u8 fi_version;
+ #define FI_VERSION_1 01
+ u8 status;
+ #define FI_STAT_SUCCESS 0x00
+ #define FI_STAT_FAILED 0x01
+ #define FI_STAT_REBOOT 0x02
+ #define FI_STAT_ADAPTYP 0x03
+ #define FI_STAT_INVALID 0x04
+ #define FI_STAT_CHKSUM 0x05
+ #define FI_STAT_LENGTH 0x06
+ #define FI_STAT_UNKNOWN 0x07
+ #define FI_STAT_IMG_VER 0x08
+ #define FI_STAT_BUSY 0x09
+ #define FI_STAT_DUAL 0x0A
+ #define FI_STAT_MISSING 0x0B
+ #define FI_STAT_UNSUPP 0x0C
+ #define FI_STAT_ERASE 0x0D
+ #define FI_STAT_FLASH 0x0E
+ #define FI_STAT_DEGRADED 0x0F
+ u8 adap_typ;
+ #define FI_AT_UNKNWN 0xFF
+ #define FI_AT_SUN_LAKE 0x0B
+ #define FI_AT_MV_9580 0x0F
+ u8 action;
+ #define FI_ACT_DOWN 0x00
+ #define FI_ACT_UP 0x01
+ #define FI_ACT_UPSZ 0x02
+ #define FI_ACT_MAX 0x02
+ #define FI_ACT_DOWN1 0x80
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ #define FI_FLG_NVR_DEF 0x0001
+ u16 num_comps;
+ #define FI_NUM_COMPS_V1 6
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+/* definitions for flash script (FS) commands */
+struct esas2r_ioctlfs_command {
+ u8 command;
+ #define ESAS2R_FS_CMD_ERASE 0
+ #define ESAS2R_FS_CMD_READ 1
+ #define ESAS2R_FS_CMD_BEGINW 2
+ #define ESAS2R_FS_CMD_WRITE 3
+ #define ESAS2R_FS_CMD_COMMIT 4
+ #define ESAS2R_FS_CMD_CANCEL 5
+ u8 checksum;
+ u8 reserved[2];
+ u32 flash_addr;
+ u32 length;
+ u32 image_offset;
+};
+
+struct esas2r_ioctl_fs {
+ u8 version;
+ #define ESAS2R_FS_VER 0
+ u8 status;
+ u8 driver_error;
+ u8 adap_type;
+ #define ESAS2R_FS_AT_ESASRAID2 3
+ #define ESAS2R_FS_AT_TSSASRAID2 4
+ #define ESAS2R_FS_AT_TSSASRAID2E 5
+ #define ESAS2R_FS_AT_TLSASHBA 6
+ u8 driver_ver;
+ u8 reserved[11];
+ struct esas2r_ioctlfs_command command;
+ u8 data[1];
+};
+
+struct esas2r_sas_nvram {
+ u8 signature[4];
+ u8 version;
+ #define SASNVR_VERSION_0 0x00
+ #define SASNVR_VERSION SASNVR_VERSION_0
+ u8 checksum;
+ #define SASNVR_CKSUM_SEED 0x5A
+ u8 max_lun_for_target;
+ u8 pci_latency;
+ #define SASNVR_PCILAT_DIS 0x00
+ #define SASNVR_PCILAT_MIN 0x10
+ #define SASNVR_PCILAT_MAX 0xF8
+ u8 options1;
+ #define SASNVR1_BOOT_DRVR 0x01
+ #define SASNVR1_BOOT_SCAN 0x02
+ #define SASNVR1_DIS_PCI_MWI 0x04
+ #define SASNVR1_FORCE_ORD_Q 0x08
+ #define SASNVR1_CACHELINE_0 0x10
+ #define SASNVR1_DIS_DEVSORT 0x20
+ #define SASNVR1_PWR_MGT_EN 0x40
+ #define SASNVR1_WIDEPORT 0x80
+ u8 options2;
+ #define SASNVR2_SINGLE_BUS 0x01
+ #define SASNVR2_SLOT_BIND 0x02
+ #define SASNVR2_EXP_PROG 0x04
+ #define SASNVR2_CMDTHR_LUN 0x08
+ #define SASNVR2_HEARTBEAT 0x10
+ #define SASNVR2_INT_CONNECT 0x20
+ #define SASNVR2_SW_MUX_CTRL 0x40
+ #define SASNVR2_DISABLE_NCQ 0x80
+ u8 int_coalescing;
+ #define SASNVR_COAL_DIS 0x00
+ #define SASNVR_COAL_LOW 0x01
+ #define SASNVR_COAL_MED 0x02
+ #define SASNVR_COAL_HI 0x03
+ u8 cmd_throttle;
+ #define SASNVR_CMDTHR_NONE 0x00
+ u8 dev_wait_time;
+ u8 dev_wait_count;
+ u8 spin_up_delay;
+ #define SASNVR_SPINUP_MAX 0x14
+ u8 ssp_align_rate;
+ u8 sas_addr[8];
+ u8 phy_speed[16];
+ #define SASNVR_SPEED_AUTO 0x00
+ #define SASNVR_SPEED_1_5GB 0x01
+ #define SASNVR_SPEED_3GB 0x02
+ #define SASNVR_SPEED_6GB 0x03
+ #define SASNVR_SPEED_12GB 0x04
+ u8 phy_mux[16];
+ #define SASNVR_MUX_DISABLED 0x00
+ #define SASNVR_MUX_1_5GB 0x01
+ #define SASNVR_MUX_3GB 0x02
+ #define SASNVR_MUX_6GB 0x03
+ u8 phy_flags[16];
+ #define SASNVR_PHF_DISABLED 0x01
+ #define SASNVR_PHF_RD_ONLY 0x02
+ u8 sort_type;
+ #define SASNVR_SORT_SAS_ADDR 0x00
+ #define SASNVR_SORT_H308_CONN 0x01
+ #define SASNVR_SORT_PHY_ID 0x02
+ #define SASNVR_SORT_SLOT_ID 0x03
+ u8 dpm_reqcmd_lmt;
+ u8 dpm_stndby_time;
+ u8 dpm_active_time;
+ u8 phy_target_id[16];
+ #define SASNVR_PTI_DISABLED 0xFF
+ u8 virt_ses_mode;
+ #define SASNVR_VSMH_DISABLED 0x00
+ u8 read_write_mode;
+ #define SASNVR_RWM_DEFAULT 0x00
+ u8 link_down_to;
+ u8 reserved[0xA1];
+};
+
+typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
+
+struct esas2r_sg_context {
+ struct esas2r_adapter *adapter;
+ struct esas2r_request *first_req;
+ u32 length;
+ u8 *cur_offset;
+ PGETPHYSADDR get_phys_addr;
+ union {
+ struct {
+ struct atto_vda_sge *curr;
+ struct atto_vda_sge *last;
+ struct atto_vda_sge *limit;
+ struct atto_vda_sge *chain;
+ } a64;
+ struct {
+ struct atto_physical_region_description *curr;
+ struct atto_physical_region_description *chain;
+ u32 sgl_max_cnt;
+ u32 sge_cnt;
+ } prd;
+ } sge;
+ struct scatterlist *cur_sgel;
+ u8 *exp_offset;
+ int num_sgel;
+ int sgel_count;
+};
+
+struct esas2r_target {
+ u8 flags;
+ #define TF_PASS_THRU 0x01
+ #define TF_USED 0x02
+ u8 new_target_state;
+ u8 target_state;
+ u8 buffered_target_state;
+#define TS_NOT_PRESENT 0x00
+#define TS_PRESENT 0x05
+#define TS_LUN_CHANGE 0x06
+#define TS_INVALID 0xFF
+ u32 block_size;
+ u32 inter_block;
+ u32 inter_byte;
+ u16 virt_targ_id;
+ u16 phys_targ_id;
+ u8 identifier_len;
+ u64 sas_addr;
+ u8 identifier[60];
+ struct atto_vda_ae_lu lu_event;
+};
+
+struct esas2r_request {
+ struct list_head comp_list;
+ struct list_head req_list;
+ union atto_vda_req *vrq;
+ struct esas2r_mem_desc *vrq_md;
+ union {
+ void *data_buf;
+ union atto_vda_rsp_data *vda_rsp_data;
+ };
+ u8 *sense_buf;
+ struct list_head sg_table_head;
+ struct esas2r_mem_desc *sg_table;
+ u32 timeout;
+ #define RQ_TIMEOUT_S1 0xFFFFFFFF
+ #define RQ_TIMEOUT_S2 0xFFFFFFFE
+ #define RQ_MAX_TIMEOUT 0xFFFFFFFD
+ u16 target_id;
+ u8 req_type;
+ #define RT_INI_REQ 0x01
+ #define RT_DISC_REQ 0x02
+ u8 sense_len;
+ union atto_vda_func_rsp func_rsp;
+ RQCALLBK comp_cb;
+ RQCALLBK interrupt_cb;
+ void *interrupt_cx;
+ u8 flags;
+ #define RF_1ST_IBLK_BASE 0x04
+ #define RF_FAILURE_OK 0x08
+ u8 req_stat;
+ u16 vda_req_sz;
+ #define RQ_SIZE_DEFAULT 0
+ u64 lba;
+ RQCALLBK aux_req_cb;
+ void *aux_req_cx;
+ u32 blk_len;
+ u32 max_blk_len;
+ union {
+ struct scsi_cmnd *cmd;
+ u8 *task_management_status_ptr;
+ };
+};
+
+struct esas2r_flash_context {
+ struct esas2r_flash_img *fi;
+ RQCALLBK interrupt_cb;
+ u8 *sgc_offset;
+ u8 *scratch;
+ u32 fi_hdr_len;
+ u8 task;
+ #define FMTSK_ERASE_BOOT 0
+ #define FMTSK_WRTBIOS 1
+ #define FMTSK_READBIOS 2
+ #define FMTSK_WRTMAC 3
+ #define FMTSK_READMAC 4
+ #define FMTSK_WRTEFI 5
+ #define FMTSK_READEFI 6
+ #define FMTSK_WRTCFG 7
+ #define FMTSK_READCFG 8
+ u8 func;
+ u16 num_comps;
+ u32 cmp_len;
+ u32 flsh_addr;
+ u32 curr_len;
+ u8 comp_typ;
+ struct esas2r_sg_context sgc;
+};
+
+struct esas2r_disc_context {
+ u8 disc_evt;
+ #define DCDE_DEV_CHANGE 0x01
+ #define DCDE_DEV_SCAN 0x02
+ u8 state;
+ #define DCS_DEV_RMV 0x00
+ #define DCS_DEV_ADD 0x01
+ #define DCS_BLOCK_DEV_SCAN 0x02
+ #define DCS_RAID_GRP_INFO 0x03
+ #define DCS_PART_INFO 0x04
+ #define DCS_PT_DEV_INFO 0x05
+ #define DCS_PT_DEV_ADDR 0x06
+ #define DCS_DISC_DONE 0xFF
+ u16 flags;
+ #define DCF_DEV_CHANGE 0x0001
+ #define DCF_DEV_SCAN 0x0002
+ #define DCF_POLLED 0x8000
+ u32 interleave;
+ u32 block_size;
+ u16 dev_ix;
+ u8 part_num;
+ u8 raid_grp_ix;
+ char raid_grp_name[16];
+ struct esas2r_target *curr_targ;
+ u16 curr_virt_id;
+ u16 curr_phys_id;
+ u8 scan_gen;
+ u8 dev_addr_type;
+ u64 sas_addr;
+};
+
+struct esas2r_mem_desc {
+ struct list_head next_desc;
+ void *virt_addr;
+ u64 phys_addr;
+ void *pad;
+ void *esas2r_data;
+ u32 esas2r_param;
+ u32 size;
+};
+
+enum fw_event_type {
+ fw_event_null,
+ fw_event_lun_change,
+ fw_event_present,
+ fw_event_not_present,
+ fw_event_vda_ae
+};
+
+struct esas2r_vda_ae {
+ u32 signature;
+#define ESAS2R_VDA_EVENT_SIG 0x4154544F
+ u8 bus_number;
+ u8 devfn;
+ u8 pad[2];
+ union atto_vda_ae vda_ae;
+};
+
+struct esas2r_fw_event_work {
+ struct list_head list;
+ struct delayed_work work;
+ struct esas2r_adapter *a;
+ enum fw_event_type type;
+ u8 data[sizeof(struct esas2r_vda_ae)];
+};
+
+enum state {
+ FW_INVALID_ST,
+ FW_STATUS_ST,
+ FW_COMMAND_ST
+};
+
+struct esas2r_firmware {
+ enum state state;
+ struct esas2r_flash_img header;
+ u8 *data;
+ u64 phys;
+ int orig_len;
+ void *header_buff;
+ u64 header_buff_phys;
+};
+
+struct esas2r_adapter {
+ struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
+ struct esas2r_target *targetdb_end;
+ unsigned char *regs;
+ unsigned char *data_window;
+ long flags;
+ #define AF_PORT_CHANGE 0
+ #define AF_CHPRST_NEEDED 1
+ #define AF_CHPRST_PENDING 2
+ #define AF_CHPRST_DETECTED 3
+ #define AF_BUSRST_NEEDED 4
+ #define AF_BUSRST_PENDING 5
+ #define AF_BUSRST_DETECTED 6
+ #define AF_DISABLED 7
+ #define AF_FLASH_LOCK 8
+ #define AF_OS_RESET 9
+ #define AF_FLASHING 10
+ #define AF_POWER_MGT 11
+ #define AF_NVR_VALID 12
+ #define AF_DEGRADED_MODE 13
+ #define AF_DISC_PENDING 14
+ #define AF_TASKLET_SCHEDULED 15
+ #define AF_HEARTBEAT 16
+ #define AF_HEARTBEAT_ENB 17
+ #define AF_NOT_PRESENT 18
+ #define AF_CHPRST_STARTED 19
+ #define AF_FIRST_INIT 20
+ #define AF_POWER_DOWN 21
+ #define AF_DISC_IN_PROG 22
+ #define AF_COMM_LIST_TOGGLE 23
+ #define AF_LEGACY_SGE_MODE 24
+ #define AF_DISC_POLLED 25
+ long flags2;
+ #define AF2_SERIAL_FLASH 0
+ #define AF2_DEV_SCAN 1
+ #define AF2_DEV_CNT_OK 2
+ #define AF2_COREDUMP_AVAIL 3
+ #define AF2_COREDUMP_SAVED 4
+ #define AF2_VDA_POWER_DOWN 5
+ #define AF2_THUNDERLINK 6
+ #define AF2_THUNDERBOLT 7
+ #define AF2_INIT_DONE 8
+ #define AF2_INT_PENDING 9
+ #define AF2_TIMER_TICK 10
+ #define AF2_IRQ_CLAIMED 11
+ #define AF2_MSI_ENABLED 12
+ atomic_t disable_cnt;
+ atomic_t dis_ints_cnt;
+ u32 int_stat;
+ u32 int_mask;
+ u32 volatile *outbound_copy;
+ struct list_head avail_request;
+ spinlock_t request_lock;
+ spinlock_t sg_list_lock;
+ spinlock_t queue_lock;
+ spinlock_t mem_lock;
+ struct list_head free_sg_list_head;
+ struct esas2r_mem_desc *sg_list_mds;
+ struct list_head active_list;
+ struct list_head defer_list;
+ struct esas2r_request **req_table;
+ union {
+ u16 prev_dev_cnt;
+ u32 heartbeat_time;
+ #define ESAS2R_HEARTBEAT_TIME (3000)
+ };
+ u32 chip_uptime;
+ #define ESAS2R_CHP_UPTIME_MAX (60000)
+ #define ESAS2R_CHP_UPTIME_CNT (20000)
+ u64 uncached_phys;
+ u8 *uncached;
+ struct esas2r_sas_nvram *nvram;
+ struct esas2r_request general_req;
+ u8 init_msg;
+ #define ESAS2R_INIT_MSG_START 1
+ #define ESAS2R_INIT_MSG_INIT 2
+ #define ESAS2R_INIT_MSG_GET_INIT 3
+ #define ESAS2R_INIT_MSG_REINIT 4
+ u16 cmd_ref_no;
+ u32 fw_version;
+ u32 fw_build;
+ u32 chip_init_time;
+ #define ESAS2R_CHPRST_TIME (180000)
+ #define ESAS2R_CHPRST_WAIT_TIME (2000)
+ u32 last_tick_time;
+ u32 window_base;
+ RQBUILDSGL build_sgl;
+ struct esas2r_request *first_ae_req;
+ u32 list_size;
+ u32 last_write;
+ u32 last_read;
+ u16 max_vdareq_size;
+ u16 disc_wait_cnt;
+ struct esas2r_mem_desc inbound_list_md;
+ struct esas2r_mem_desc outbound_list_md;
+ struct esas2r_disc_context disc_ctx;
+ u8 *disc_buffer;
+ u32 disc_start_time;
+ u32 disc_wait_time;
+ u32 flash_ver;
+ char flash_rev[16];
+ char fw_rev[16];
+ char image_type[16];
+ struct esas2r_flash_context flash_context;
+ u32 num_targets_backend;
+ u32 ioctl_tunnel;
+ struct tasklet_struct tasklet;
+ struct pci_dev *pcid;
+ struct Scsi_Host *host;
+ unsigned int index;
+ char name[32];
+ struct timer_list timer;
+ struct esas2r_firmware firmware;
+ wait_queue_head_t nvram_waiter;
+ int nvram_command_done;
+ wait_queue_head_t fm_api_waiter;
+ int fm_api_command_done;
+ wait_queue_head_t vda_waiter;
+ int vda_command_done;
+ u8 *vda_buffer;
+ u64 ppvda_buffer;
+#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
+#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
+ wait_queue_head_t fs_api_waiter;
+ int fs_api_command_done;
+ u64 ppfs_api_buffer;
+ u8 *fs_api_buffer;
+ u32 fs_api_buffer_size;
+ wait_queue_head_t buffered_ioctl_waiter;
+ int buffered_ioctl_done;
+ int uncached_size;
+ struct workqueue_struct *fw_event_q;
+ struct list_head fw_event_list;
+ spinlock_t fw_event_lock;
+ u8 fw_events_off; /* if '1', then ignore events */
+ char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
+ /*
+ * intr_mode stores the interrupt mode currently being used by this
+ * adapter. it is based on the interrupt_mode module parameter, but
+ * can be changed based on the ability (or not) to utilize the
+ * mode requested by the parameter.
+ */
+ int intr_mode;
+#define INTR_MODE_LEGACY 0
+#define INTR_MODE_MSI 1
+#define INTR_MODE_MSIX 2
+ struct esas2r_sg_context fm_api_sgc;
+ u8 *save_offset;
+ struct list_head vrq_mds_head;
+ struct esas2r_mem_desc *vrq_mds;
+ int num_vrqs;
+ struct semaphore fm_api_semaphore;
+ struct semaphore fs_api_semaphore;
+ struct semaphore nvram_semaphore;
+ struct atto_ioctl *local_atto_ioctl;
+ u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
+ unsigned int sysfs_fw_created:1;
+ unsigned int sysfs_fs_created:1;
+ unsigned int sysfs_vda_created:1;
+ unsigned int sysfs_hw_created:1;
+ unsigned int sysfs_live_nvram_created:1;
+ unsigned int sysfs_default_nvram_created:1;
+};
+
+/*
+ * Function Declarations
+ * SCSI functions
+ */
+int esas2r_release(struct Scsi_Host *);
+const char *esas2r_info(struct Scsi_Host *);
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data);
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba);
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
+int esas2r_slave_alloc(struct scsi_device *dev);
+int esas2r_slave_configure(struct scsi_device *dev);
+void esas2r_slave_destroy(struct scsi_device *dev);
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
+int esas2r_change_queue_type(struct scsi_device *dev, int type);
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+/* SCSI error handler (eh) functions */
+int esas2r_eh_abort(struct scsi_cmnd *cmd);
+int esas2r_device_reset(struct scsi_cmnd *cmd);
+int esas2r_host_reset(struct scsi_cmnd *cmd);
+int esas2r_bus_reset(struct scsi_cmnd *cmd);
+int esas2r_target_reset(struct scsi_cmnd *cmd);
+
+/* Internal functions */
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index);
+int esas2r_cleanup(struct Scsi_Host *host);
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+void esas2r_adapter_tasklet(unsigned long context);
+irqreturn_t esas2r_interrupt(int irq, void *dev_id);
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
+void esas2r_kickoff_timer(struct esas2r_adapter *a);
+int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
+int esas2r_resume(struct pci_dev *pcid);
+void esas2r_fw_event_off(struct esas2r_adapter *a);
+void esas2r_fw_event_on(struct esas2r_adapter *a);
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_reset_detected(struct esas2r_adapter *a);
+void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
+ u8 state);
+int esas2r_req_status_to_error(u8 req_stat);
+void esas2r_kill_adapter(int i);
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area);
+bool esas2r_check_adapter(struct esas2r_adapter *a);
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func);
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
+void esas2r_adapter_interrupt(struct esas2r_adapter *a);
+void esas2r_do_deferred_processes(struct esas2r_adapter *a);
+void esas2r_reset_bus(struct esas2r_adapter *a);
+void esas2r_reset_adapter(struct esas2r_adapter *a);
+void esas2r_timer_tick(struct esas2r_adapter *a);
+const char *esas2r_get_model_name(struct esas2r_adapter *a);
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
+u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
+ u32 *delay);
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length);
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data);
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len);
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func);
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data);
+void esas2r_power_down(struct esas2r_adapter *a);
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc);
+void esas2r_force_interrupt(struct esas2r_adapter *a);
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_process_adapter_reset(struct esas2r_adapter *a);
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_dummy_complete(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_read_flash_rev(struct esas2r_adapter *a);
+bool esas2r_read_image_type(struct esas2r_adapter *a);
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
+bool esas2r_nvram_validate(struct esas2r_adapter *a);
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
+bool esas2r_print_flash_rev(struct esas2r_adapter *a);
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
+bool esas2r_init_msgs(struct esas2r_adapter *a);
+bool esas2r_is_adapter_present(struct esas2r_adapter *a);
+void esas2r_nuxi_mgt_data(u8 function, void *data);
+void esas2r_nuxi_cfg_data(u8 function, void *data);
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
+void esas2r_reset_chip(struct esas2r_adapter *a);
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_polled_interrupt(struct esas2r_adapter *a);
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status);
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+void esas2r_targ_db_initialize(struct esas2r_adapter *a);
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc);
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len);
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr);
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len);
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id);
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
+void esas2r_disc_initialize(struct esas2r_adapter *a);
+void esas2r_disc_start_waiting(struct esas2r_adapter *a);
+void esas2r_disc_check_for_work(struct esas2r_adapter *a);
+void esas2r_disc_check_complete(struct esas2r_adapter *a);
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
+bool esas2r_disc_start_port(struct esas2r_adapter *a);
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz);
+
+/* Inline functions */
+
+/* Allocate a chip scatter/gather list entry */
+static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct list_head *sgl;
+ struct esas2r_mem_desc *result = NULL;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ if (likely(!list_empty(&a->free_sg_list_head))) {
+ sgl = a->free_sg_list_head.next;
+ result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
+ list_del_init(sgl);
+ }
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+
+ return result;
+}
+
+/* Initialize a scatter/gather context */
+static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
+ struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_sge *first)
+{
+ sgc->adapter = a;
+ sgc->first_req = rq;
+
+ /*
+ * set the limit pointer such that an SGE pointer above this value
+ * would be the first one to overflow the SGL.
+ */
+ sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
+ + (sizeof(union
+ atto_vda_req) /
+ 8)
+ - sizeof(struct
+ atto_vda_sge));
+ if (first) {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = first;
+ rq->vrq->scsi.sg_list_offset = (u8)
+ ((u8 *)first -
+ (u8 *)rq->vrq);
+ } else {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
+ rq->vrq->scsi.sg_list_offset =
+ (u8)offsetof(struct atto_vda_scsi_req, u.sge);
+ }
+ sgc->sge.a64.chain = NULL;
+}
+
+static inline void esas2r_rq_init_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ union atto_vda_req *vrq = rq->vrq;
+
+ INIT_LIST_HEAD(&rq->sg_table_head);
+ rq->data_buf = (void *)(vrq + 1);
+ rq->interrupt_cb = NULL;
+ rq->comp_cb = esas2r_complete_request_cb;
+ rq->flags = 0;
+ rq->timeout = 0;
+ rq->req_stat = RS_PENDING;
+ rq->req_type = RT_INI_REQ;
+
+ /* clear the outbound response */
+ rq->func_rsp.dwords[0] = 0;
+ rq->func_rsp.dwords[1] = 0;
+
+ /*
+ * clear the size of the VDA request. esas2r_build_sg_list() will
+ * only allow the size of the request to grow. there are some
+ * management requests that go through there twice and the second
+ * time through sets a smaller request size. if this is not modified
+ * at all we'll set it to the size of the entire VDA request.
+ */
+ rq->vda_req_sz = RQ_SIZE_DEFAULT;
+
+ /* req_table entry should be NULL at this point - if not, halt */
+
+ if (a->req_table[LOWORD(vrq->scsi.handle)])
+ esas2r_bugon();
+
+ /* fill in the table for this handle so we can get back to the
+ * request.
+ */
+ a->req_table[LOWORD(vrq->scsi.handle)] = rq;
+
+ /*
+ * add a reference number to the handle to make it unique (until it
+ * wraps of course) while preserving the least significant word
+ */
+ vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
+
+ /*
+ * the following formats a SCSI request. the caller can override as
+ * necessary. clear_vda_request can be called to clear the VDA
+ * request for another type of request.
+ */
+ vrq->scsi.function = VDA_FUNC_SCSI;
+ vrq->scsi.sense_len = SENSE_DATA_SZ;
+
+ /* clear out sg_list_offset and chain_offset */
+ vrq->scsi.sg_list_offset = 0;
+ vrq->scsi.chain_offset = 0;
+ vrq->scsi.flags = 0;
+ vrq->scsi.reserved = 0;
+
+ /* set the sense buffer to be the data payload buffer */
+ vrq->scsi.ppsense_buf
+ = cpu_to_le64(rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+}
+
+static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ if (list_empty(&rq->sg_table_head))
+ return;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+}
+
+static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+
+{
+ esas2r_rq_free_sg_lists(rq, a);
+ a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
+ rq->data_buf = NULL;
+}
+
+static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
+{
+
+ return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
+ test_bit(AF_BUSRST_DETECTED, &a->flags) ||
+ test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_PORT_CHANGE, &a->flags);
+
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the esas2r_sg_context. The caller must initialize
+ * struct esas2r_sg_context prior to the initial call by calling
+ * esas2r_sgc_init()
+ */
+static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
+ return true;
+
+ return (*a->build_sgl)(a, sgc);
+}
+
+static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_inc_return(&a->dis_ints_cnt) == 1)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_DIS_MASK);
+}
+
+static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_dec_return(&a->dis_ints_cnt) == 0)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_ENB_MASK);
+}
+
+/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
+ * or long completion times.
+ */
+static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
+{
+ /* make sure we don't schedule twice */
+ if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
+ tasklet_hi_schedule(&a->tasklet);
+}
+
+static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
+{
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ (a->nvram->options2 & SASNVR2_HEARTBEAT))
+ set_bit(AF_HEARTBEAT_ENB, &a->flags);
+ else
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+}
+
+static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
+{
+ clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+ clear_bit(AF_HEARTBEAT, &a->flags);
+}
+
+/* Set the initial state for resetting the adapter on the next pass through
+ * esas2r_do_deferred.
+ */
+static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
+{
+ esas2r_disable_heartbeat(a);
+
+ set_bit(AF_CHPRST_NEEDED, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+}
+
+/* See if an interrupt is pending on the adapter. */
+static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
+{
+ u32 intstat;
+
+ if (a->int_mask == 0)
+ return false;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if ((intstat & a->int_mask) == 0)
+ return false;
+
+ esas2r_disable_chip_interrupts(a);
+
+ a->int_stat = intstat;
+ a->int_mask = 0;
+
+ return true;
+}
+
+static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
+ struct esas2r_adapter *a)
+{
+ return (u16)(uintptr_t)(t - a->targetdb);
+}
+
+/* Build and start an asynchronous event request */
+static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_build_ae_req(a, rq);
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
+ struct list_head *comp_list)
+{
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, comp_list) {
+ rq = list_entry(element, struct esas2r_request, comp_list);
+ list_del_init(element);
+ esas2r_complete_request(a, rq);
+ }
+}
+
+/* sysfs handlers */
+extern struct bin_attribute bin_attr_fw;
+extern struct bin_attribute bin_attr_fs;
+extern struct bin_attribute bin_attr_vda;
+extern struct bin_attribute bin_attr_hw;
+extern struct bin_attribute bin_attr_live_nvram;
+extern struct bin_attribute bin_attr_default_nvram;
+
+#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
new file mode 100644
index 000000000000..1c079f4300a5
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -0,0 +1,1184 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_disc.c
+ * esas2r device discovery routines
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Miscellaneous internal discovery routines */
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Internal discovery routines that process the states */
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+void esas2r_disc_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *nvr = a->nvram;
+
+ esas2r_trace_enter();
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+ clear_bit(AF2_DEV_SCAN, &a->flags2);
+ clear_bit(AF2_DEV_CNT_OK, &a->flags2);
+
+ a->disc_start_time = jiffies_to_msecs(jiffies);
+ a->disc_wait_time = nvr->dev_wait_time * 1000;
+ a->disc_wait_cnt = nvr->dev_wait_count;
+
+ if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
+ a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
+
+ /*
+ * If we are doing chip reset or power management processing, always
+ * wait for devices. use the NVRAM device count if it is greater than
+ * previously discovered devices.
+ */
+
+ esas2r_hdebug("starting discovery...");
+
+ a->general_req.interrupt_cx = NULL;
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+ test_bit(AF_POWER_MGT, &a->flags)) {
+ if (a->prev_dev_cnt == 0) {
+ /* Don't bother waiting if there is nothing to wait
+ * for.
+ */
+ a->disc_wait_time = 0;
+ } else {
+ /*
+ * Set the device wait count to what was previously
+ * found. We don't care if the user only configured
+ * a time because we know the exact count to wait for.
+ * There is no need to honor the user's wishes to
+ * always wait the full time.
+ */
+ a->disc_wait_cnt = a->prev_dev_cnt;
+
+ /*
+ * bump the minimum wait time to 15 seconds since the
+ * default is 3 (system boot or the boot driver usually
+ * buys us more time).
+ */
+ if (a->disc_wait_time < 15000)
+ a->disc_wait_time = 15000;
+ }
+ }
+
+ esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
+ esas2r_trace("disc wait time: %d", a->disc_wait_time);
+
+ if (a->disc_wait_time == 0)
+ esas2r_disc_check_complete(a);
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_start_waiting(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (a->disc_ctx.disc_evt)
+ esas2r_disc_start_port(a);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+void esas2r_disc_check_for_work(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+
+ /* service any pending interrupts first */
+
+ esas2r_polled_interrupt(a);
+
+ /*
+ * now, interrupt processing may have queued up a discovery event. go
+ * see if we have one to start. we couldn't start it in the ISR since
+ * polled discovery would cause a deadlock.
+ */
+
+ esas2r_disc_start_waiting(a);
+
+ if (rq->interrupt_cx == NULL)
+ return;
+
+ if (rq->req_stat == RS_STARTED
+ && rq->timeout <= RQ_MAX_TIMEOUT) {
+ /* wait for the current discovery request to complete. */
+ esas2r_wait_request(a, rq);
+
+ if (rq->req_stat == RS_TIMEOUT) {
+ esas2r_disc_abort(a, rq);
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+
+ if (rq->req_stat == RS_PENDING
+ || rq->req_stat == RS_STARTED)
+ return;
+
+ esas2r_disc_continue(a, rq);
+}
+
+void esas2r_disc_check_complete(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ /* check to see if we should be waiting for devices */
+ if (a->disc_wait_time) {
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 time = currtime - a->disc_start_time;
+
+ /*
+ * Wait until the device wait time is exhausted or the device
+ * wait count is satisfied.
+ */
+ if (time < a->disc_wait_time
+ && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
+ || a->disc_wait_cnt == 0)) {
+ /* After three seconds of waiting, schedule a scan. */
+ if (time >= 3000
+ && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * We are done waiting...we think. Adjust the wait time to
+ * consume events after the count is met.
+ */
+ if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
+ a->disc_wait_time = time + 3000;
+
+ /* If we haven't done a full scan yet, do it now. */
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * Now, if there is still time left to consume events, continue
+ * waiting.
+ */
+ if (time < a->disc_wait_time) {
+ esas2r_trace_exit();
+ return;
+ }
+ } else {
+ if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+ }
+
+ /* We want to stop waiting for devices. */
+ a->disc_wait_time = 0;
+
+ if (test_bit(AF_DISC_POLLED, &a->flags) &&
+ test_bit(AF_DISC_IN_PROG, &a->flags)) {
+ /*
+ * Polled discovery is still pending so continue the active
+ * discovery until it is done. At that point, we will stop
+ * polled discovery and transition to interrupt driven
+ * discovery.
+ */
+ } else {
+ /*
+ * Done waiting for devices. Note that we get here immediately
+ * after deferred waiting completes because that is interrupt
+ * driven; i.e. There is no transition.
+ */
+ esas2r_disc_fix_curr_requests(a);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+
+ /*
+ * We have deferred target state changes until now because we
+ * don't want to report any removals (due to the first arrival)
+ * until the device wait time expires.
+ */
+ set_bit(AF_PORT_CHANGE, &a->flags);
+ }
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
+{
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("disc_event: %d", disc_evt);
+
+ /* Initialize the discovery context */
+ dc->disc_evt |= disc_evt;
+
+ /*
+ * Don't start discovery before or during polled discovery. if we did,
+ * we would have a deadlock if we are in the ISR already.
+ */
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_POLLED, &a->flags))
+ esas2r_disc_start_port(a);
+
+ esas2r_trace_exit();
+}
+
+bool esas2r_disc_start_port(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+ bool ret;
+
+ esas2r_trace_enter();
+
+ if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* If there is a discovery waiting, process it. */
+ if (dc->disc_evt) {
+ if (test_bit(AF_DISC_POLLED, &a->flags)
+ && a->disc_wait_time == 0) {
+ /*
+ * We are doing polled discovery, but we no longer want
+ * to wait for devices. Stop polled discovery and
+ * transition to interrupt driven discovery.
+ */
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+ } else {
+ /* Discovery is complete. */
+
+ esas2r_hdebug("disc done");
+
+ set_bit(AF_PORT_CHANGE, &a->flags);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* Handle the discovery context */
+ esas2r_trace("disc_evt: %d", dc->disc_evt);
+ set_bit(AF_DISC_IN_PROG, &a->flags);
+ dc->flags = 0;
+
+ if (test_bit(AF_DISC_POLLED, &a->flags))
+ dc->flags |= DCF_POLLED;
+
+ rq->interrupt_cx = dc;
+ rq->req_stat = RS_SUCCESS;
+
+ /* Decode the event code */
+ if (dc->disc_evt & DCDE_DEV_SCAN) {
+ dc->disc_evt &= ~DCDE_DEV_SCAN;
+
+ dc->flags |= DCF_DEV_SCAN;
+ dc->state = DCS_BLOCK_DEV_SCAN;
+ } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
+ dc->disc_evt &= ~DCDE_DEV_CHANGE;
+
+ dc->flags |= DCF_DEV_CHANGE;
+ dc->state = DCS_DEV_RMV;
+ }
+
+ /* Continue interrupt driven discovery */
+ if (!test_bit(AF_DISC_POLLED, &a->flags))
+ ret = esas2r_disc_continue(a, rq);
+ else
+ ret = true;
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ /* Device discovery/removal */
+ while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
+ rslt = false;
+
+ switch (dc->state) {
+ case DCS_DEV_RMV:
+
+ rslt = esas2r_disc_dev_remove(a, rq);
+ break;
+
+ case DCS_DEV_ADD:
+
+ rslt = esas2r_disc_dev_add(a, rq);
+ break;
+
+ case DCS_BLOCK_DEV_SCAN:
+
+ rslt = esas2r_disc_block_dev_scan(a, rq);
+ break;
+
+ case DCS_RAID_GRP_INFO:
+
+ rslt = esas2r_disc_raid_grp_info(a, rq);
+ break;
+
+ case DCS_PART_INFO:
+
+ rslt = esas2r_disc_part_info(a, rq);
+ break;
+
+ case DCS_PT_DEV_INFO:
+
+ rslt = esas2r_disc_passthru_dev_info(a, rq);
+ break;
+ case DCS_PT_DEV_ADDR:
+
+ rslt = esas2r_disc_passthru_dev_addr(a, rq);
+ break;
+ case DCS_DISC_DONE:
+
+ dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
+ break;
+
+ default:
+
+ esas2r_bugon();
+ dc->state = DCS_DISC_DONE;
+ break;
+ }
+
+ if (rslt)
+ return true;
+ }
+
+ /* Discovery is done...for now. */
+ rq->interrupt_cx = NULL;
+
+ if (!test_bit(AF_DISC_PENDING, &a->flags))
+ esas2r_disc_fix_curr_requests(a);
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+
+ /* Start the next discovery. */
+ return esas2r_disc_start_port(a);
+}
+
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ /* Set the timeout to a minimum value. */
+ if (rq->timeout < ESAS2R_DEFAULT_TMO)
+ rq->timeout = ESAS2R_DEFAULT_TMO;
+
+ /*
+ * Override the request type to distinguish discovery requests. If we
+ * end up deferring the request, esas2r_disc_local_start_request()
+ * will be called to restart it.
+ */
+ rq->req_type = RT_DISC_REQ;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags))
+ esas2r_disc_local_start_request(a, rq);
+ else
+ list_add_tail(&rq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ return true;
+}
+
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+
+ list_add_tail(&rq->req_list, &a->active_list);
+
+ esas2r_start_vda_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return;
+}
+
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ esas2r_trace_enter();
+
+ /* abort the current discovery */
+
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_SCAN,
+ 0,
+ 0,
+ 0,
+ NULL);
+
+ rq->comp_cb = esas2r_disc_block_dev_scan_cb;
+
+ rq->timeout = 30000;
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SUCCESS)
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix = 0;
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
+
+ if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_GRP_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vda_grp_info),
+ NULL);
+
+ grpinfo->grp_index = dc->raid_grp_ix;
+
+ rq->comp_cb = esas2r_disc_raid_grp_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ goto done;
+ }
+
+ if (rq->req_stat == RS_SUCCESS) {
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ if (grpinfo->status != VDA_GRP_STAT_ONLINE
+ && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
+ /* go to the next group. */
+
+ dc->raid_grp_ix++;
+ } else {
+ memcpy(&dc->raid_grp_name[0],
+ &grpinfo->grp_name[0],
+ sizeof(grpinfo->grp_name));
+
+ dc->interleave = le32_to_cpu(grpinfo->interleave);
+ dc->block_size = le32_to_cpu(grpinfo->block_size);
+
+ dc->state = DCS_PART_INFO;
+ dc->part_num = 0;
+ }
+ } else {
+ if (!(rq->req_stat == RS_GRP_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group info failed - "
+ "returned with %x",
+ rq->req_stat);
+ }
+
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ }
+
+done:
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("part_num: %d", dc->part_num);
+
+ if (dc->part_num >= VDA_MAX_PARTITIONS) {
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ memset(partinfo, 0, sizeof(struct atto_vdapart_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_PART_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vdapart_info),
+ NULL);
+
+ partinfo->part_no = dc->part_num;
+
+ memcpy(&partinfo->grp_name[0],
+ &dc->raid_grp_name[0],
+ sizeof(partinfo->grp_name));
+
+ rq->comp_cb = esas2r_disc_part_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ dc->state = DCS_RAID_GRP_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ dc->part_num = partinfo->part_no;
+
+ dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
+
+ esas2r_targ_db_add_raid(a, dc);
+
+ dc->part_num++;
+ } else {
+ if (!(rq->req_stat == RS_PART_LAST)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group partition info "
+ "failed - status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("dev_ix: %d", dc->dev_ix);
+
+ esas2r_rq_init_request(rq, a);
+
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_PT_INFO,
+ dc->scan_gen,
+ dc->dev_ix,
+ sizeof(struct atto_vda_devinfo),
+ NULL);
+
+ rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
+
+ dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
+
+ if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
+ dc->curr_phys_id =
+ le16_to_cpu(devinfo->phys_target_id);
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->state = DCS_PT_DEV_ADDR;
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ } else {
+ dc->dev_ix++;
+ }
+ } else {
+ if (!(rq->req_stat == RS_DEV_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for device information failed - "
+ "status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_DISC_DONE;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_ioctl *hi;
+ struct esas2r_sg_context sgc;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ /* format the request. */
+
+ sgc.cur_offset = NULL;
+ sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
+ sgc.length = offsetof(struct atto_ioctl, data)
+ + sizeof(struct atto_hba_get_device_address);
+
+ esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, &sgc)) {
+ esas2r_rq_destroy_request(rq, a);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
+
+ rq->interrupt_cx = dc;
+
+ /* format the IOCTL data. */
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
+
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ hi->function = ATTO_FUNC_GET_DEV_ADDR;
+ hi->flags = HBAF_TUNNEL;
+
+ hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
+ hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
+
+ /* start it up. */
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = NULL;
+ unsigned long flags;
+ struct atto_ioctl *hi;
+ u16 addrlen;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ if (rq->req_stat == RS_SUCCESS
+ && hi->status == ATTO_STS_SUCCESS) {
+ addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
+
+ if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
+ if (addrlen == sizeof(u64))
+ memcpy(&dc->sas_addr,
+ &hi->data.get_dev_addr.address[0],
+ addrlen);
+ else
+ memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
+
+ /* Get the unique identifier. */
+ dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
+
+ goto next_dev_addr;
+ } else {
+ /* Add the pass through target. */
+ if (HIBYTE(addrlen) == 0) {
+ t = esas2r_targ_db_add_pthru(a,
+ dc,
+ &hi->data.
+ get_dev_addr.
+ address[0],
+ (u8)hi->data.
+ get_dev_addr.
+ addr_len);
+
+ if (t)
+ memcpy(&t->sas_addr, &dc->sas_addr,
+ sizeof(t->sas_addr));
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the "
+ "back end data (%s:%d)",
+ __func__,
+ __LINE__);
+ }
+ }
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the back end data - "
+ "rq->req_stat:%d hi->status:%d",
+ rq->req_stat, hi->status);
+ }
+
+ /* proceed to the next device. */
+
+ if (dc->flags & DCF_DEV_SCAN) {
+ dc->dev_ix++;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (dc->flags & DCF_DEV_CHANGE) {
+ dc->curr_targ++;
+ dc->state = DCS_DEV_ADD;
+ } else {
+ esas2r_bugon();
+ }
+
+next_dev_addr:
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = sgc->adapter;
+
+ if (sgc->length > ESAS2R_DISC_BUF_LEN)
+ esas2r_bugon();
+
+ *addr = a->uncached_phys
+ + (u64)((u8 *)a->disc_buffer - a->uncached);
+
+ return sgc->length;
+}
+
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t;
+ struct esas2r_target *t2;
+
+ esas2r_trace_enter();
+
+ /* process removals. */
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->new_target_state != TS_NOT_PRESENT)
+ continue;
+
+ t->new_target_state = TS_INVALID;
+
+ /* remove the right target! */
+
+ t2 =
+ esas2r_targ_db_find_by_virt_id(a,
+ esas2r_targ_get_id(t,
+ a));
+
+ if (t2)
+ esas2r_targ_db_remove(a, t2);
+ }
+
+ /* removals complete. process arrivals. */
+
+ dc->state = DCS_DEV_ADD;
+ dc->curr_targ = a->targetdb;
+
+ esas2r_trace_exit();
+
+ return false;
+}
+
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = dc->curr_targ;
+
+ if (t >= a->targetdb_end) {
+ /* done processing state changes. */
+
+ dc->state = DCS_DISC_DONE;
+ } else if (t->new_target_state == TS_PRESENT) {
+ struct atto_vda_ae_lu *luevt = &t->lu_event;
+
+ esas2r_trace_enter();
+
+ /* clear this now in case more events come in. */
+
+ t->new_target_state = TS_INVALID;
+
+ /* setup the discovery context for adding this device. */
+
+ dc->curr_virt_id = esas2r_targ_get_id(t, a);
+
+ if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
+ && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
+ dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
+ dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
+ } else {
+ dc->block_size = 0;
+ dc->interleave = 0;
+ }
+
+ /* determine the device type being added. */
+
+ if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
+ if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
+ dc->state = DCS_PT_DEV_ADDR;
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->curr_phys_id = luevt->wphys_target_id;
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "luevt->dwevent does not have the "
+ "VDAAE_LU_PHYS_ID bit set (%s:%d)",
+ __func__, __LINE__);
+ }
+ } else {
+ dc->raid_grp_name[0] = 0;
+
+ esas2r_targ_db_add_raid(a, dc);
+ }
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ esas2r_trace("dwevent: %d", luevt->dwevent);
+
+ esas2r_trace_exit();
+ }
+
+ if (dc->state == DCS_DEV_ADD) {
+ /* go to the next device. */
+
+ dc->curr_targ++;
+ }
+
+ return false;
+}
+
+/*
+ * When discovery is done, find all requests on defer queue and
+ * test if they need to be modified. If a target is no longer present
+ * then complete the request with RS_SEL. Otherwise, update the
+ * target_id since after a hibernate it can be a different value.
+ * VDA does not make passthrough target IDs persistent.
+ */
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct esas2r_target *t;
+ struct esas2r_request *rq;
+ struct list_head *element;
+
+ /* update virt_targ_id in any outstanding esas2r_requests */
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ t = a->targetdb + rq->target_id;
+
+ if (t->target_state == TS_PRESENT)
+ rq->vrq->scsi.target_id = le16_to_cpu(
+ t->virt_targ_id);
+ else
+ rq->req_stat = RS_SEL;
+ }
+
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
new file mode 100644
index 000000000000..b7dc59fca7a6
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -0,0 +1,1521 @@
+
+/*
+ * linux/drivers/scsi/esas2r/esas2r_flash.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/* local macro defs */
+#define esas2r_nvramcalc_cksum(n) \
+ (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
+ SASNVR_CKSUM_SEED))
+#define esas2r_nvramcalc_xor_cksum(n) \
+ (esas2r_calc_byte_xor_cksum((u8 *)(n), \
+ sizeof(struct esas2r_sas_nvram), 0))
+
+#define ESAS2R_FS_DRVR_VER 2
+
+static struct esas2r_sas_nvram default_sas_nvram = {
+ { 'E', 'S', 'A', 'S' }, /* signature */
+ SASNVR_VERSION, /* version */
+ 0, /* checksum */
+ 31, /* max_lun_for_target */
+ SASNVR_PCILAT_MAX, /* pci_latency */
+ SASNVR1_BOOT_DRVR, /* options1 */
+ SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
+ | SASNVR2_SW_MUX_CTRL,
+ SASNVR_COAL_DIS, /* int_coalescing */
+ SASNVR_CMDTHR_NONE, /* cmd_throttle */
+ 3, /* dev_wait_time */
+ 1, /* dev_wait_count */
+ 0, /* spin_up_delay */
+ 0, /* ssp_align_rate */
+ { 0x50, 0x01, 0x08, 0x60, /* sas_addr */
+ 0x00, 0x00, 0x00, 0x00 },
+ { SASNVR_SPEED_AUTO }, /* phy_speed */
+ { SASNVR_MUX_DISABLED }, /* SAS multiplexing */
+ { 0 }, /* phy_flags */
+ SASNVR_SORT_SAS_ADDR, /* sort_type */
+ 3, /* dpm_reqcmd_lmt */
+ 3, /* dpm_stndby_time */
+ 0, /* dpm_active_time */
+ { 0 }, /* phy_target_id */
+ SASNVR_VSMH_DISABLED, /* virt_ses_mode */
+ SASNVR_RWM_DEFAULT, /* read_write_mode */
+ 0, /* link down timeout */
+ { 0 } /* reserved */
+};
+
+static u8 cmd_to_fls_func[] = {
+ 0xFF,
+ VDA_FLASH_READ,
+ VDA_FLASH_BEGINW,
+ VDA_FLASH_WRITE,
+ VDA_FLASH_COMMIT,
+ VDA_FLASH_CANCEL
+};
+
+static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
+{
+ u32 cksum = seed;
+ u8 *p = (u8 *)&cksum;
+
+ while (len) {
+ if (((uintptr_t)addr & 3) == 0)
+ break;
+
+ cksum = cksum ^ *addr;
+ addr++;
+ len--;
+ }
+ while (len >= sizeof(u32)) {
+ cksum = cksum ^ *(u32 *)addr;
+ addr += 4;
+ len -= 4;
+ }
+ while (len--) {
+ cksum = cksum ^ *addr;
+ addr++;
+ }
+ return p[0] ^ p[1] ^ p[2] ^ p[3];
+}
+
+static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
+{
+ u8 *p = (u8 *)addr;
+ u8 cksum = seed;
+
+ while (len--)
+ cksum = cksum + p[len];
+ return cksum;
+}
+
+/* Interrupt callback to process FM API write requests. */
+static void esas2r_fmapi_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* Last request was successful. See what to do now. */
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ if (fc->sgc.cur_offset == NULL)
+ goto commit;
+
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+commit:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ rq->interrupt_cb = fc->interrupt_cb;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING)
+ /*
+ * All done. call the real callback to complete the FM API
+ * request. We should only get here if a BEGINW or WRITE
+ * operation failed.
+ */
+ (*fc->interrupt_cb)(a, rq);
+}
+
+/*
+ * Build a flash request based on the flash context. The request status
+ * is filled in on an error.
+ */
+static void build_flash_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_sg_context *sgc = &fc->sgc;
+ u8 cksum = 0;
+
+ /* calculate the checksum */
+ if (fc->func == VDA_FLASH_BEGINW) {
+ if (sgc->cur_offset)
+ cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
+ sgc->length,
+ 0);
+ rq->interrupt_cb = esas2r_fmapi_callback;
+ } else {
+ rq->interrupt_cb = fc->interrupt_cb;
+ }
+ esas2r_build_flash_req(a,
+ rq,
+ fc->func,
+ cksum,
+ fc->flsh_addr,
+ sgc->length);
+
+ esas2r_rq_free_sg_lists(rq, a);
+
+ /*
+ * remember the length we asked for. we have to keep track of
+ * the current amount done so we know how much to compare when
+ * doing the verification phase.
+ */
+ fc->curr_len = fc->sgc.length;
+
+ if (sgc->cur_offset) {
+ /* setup the S/G context to build the S/G table */
+ esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ rq->req_stat = RS_BUSY;
+ return;
+ }
+ } else {
+ fc->sgc.length = 0;
+ }
+
+ /* update the flsh_addr to the next one to write to */
+ fc->flsh_addr += fc->curr_len;
+}
+
+/* determine the method to process the flash request */
+static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ /*
+ * assume we have more to do. if we return with the status set to
+ * RS_PENDING, FM API tasks will continue.
+ */
+ rq->req_stat = RS_PENDING;
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ /* not suppported for now */;
+ else
+ build_flash_msg(a, rq);
+
+ return rq->req_stat == RS_PENDING;
+}
+
+/* boot image fixer uppers called before downloading the image. */
+static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
+ struct esas2r_pc_image *pi;
+ struct esas2r_boot_header *bh;
+
+ pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
+ bh =
+ (struct esas2r_boot_header *)((u8 *)pi +
+ le16_to_cpu(pi->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+
+ /* Recalculate the checksum in the PNP header if there */
+ if (pi->pnp_offset) {
+ u8 *pnp_header_bytes =
+ ((u8 *)pi + le16_to_cpu(pi->pnp_offset));
+
+ /* Identifier - dword that starts at byte 10 */
+ *((u32 *)&pnp_header_bytes[10]) =
+ cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
+ a->pcid->subsystem_device));
+
+ /* Checksum - byte 9 */
+ pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
+ 32, 0);
+ }
+
+ /* Recalculate the checksum needed by the PC */
+ pi->checksum = pi->checksum -
+ esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
+}
+
+static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
+ u32 len = ch->length;
+ u32 offset = ch->image_offset;
+ struct esas2r_efi_image *ei;
+ struct esas2r_boot_header *bh;
+
+ while (len) {
+ u32 thislen;
+
+ ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
+ bh = (struct esas2r_boot_header *)((u8 *)ei +
+ le16_to_cpu(
+ ei->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+
+ if (thislen > len)
+ break;
+
+ len -= thislen;
+ offset += thislen;
+ }
+}
+
+/* Complete a FM API request with the specified status. */
+static bool complete_fmapi_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq, u8 fi_stat)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+
+ fi->status = fi_stat;
+ fi->driver_error = rq->req_stat;
+ rq->interrupt_cb = NULL;
+ rq->req_stat = RS_SUCCESS;
+
+ if (fi_stat != FI_STAT_IMG_VER)
+ memset(fc->scratch, 0, FM_BUF_SZ);
+
+ esas2r_enable_heartbeat(a);
+ clear_bit(AF_FLASH_LOCK, &a->flags);
+ return false;
+}
+
+/* Process each phase of the flash download process. */
+static void fw_download_proc(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+ struct esas2r_component_header *ch;
+ u32 len;
+ u8 *p, *q;
+
+ /* If the previous operation failed, just return. */
+ if (rq->req_stat != RS_SUCCESS)
+ goto error;
+
+ /*
+ * If an upload just completed and the compare length is non-zero,
+ * then we just read back part of the image we just wrote. verify the
+ * section and continue reading until the entire image is verified.
+ */
+ if (fc->func == VDA_FLASH_READ
+ && fc->cmp_len) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ p = fc->scratch;
+ q = (u8 *)fi /* start of the whole gob */
+ + ch->image_offset /* start of the current image */
+ + ch->length /* end of the current image */
+ - fc->cmp_len; /* where we are now */
+
+ /*
+ * NOTE - curr_len is the exact count of bytes for the read
+ * even when the end is read and its not a full buffer
+ */
+ for (len = fc->curr_len; len; len--)
+ if (*p++ != *q++)
+ goto error;
+
+ fc->cmp_len -= fc->curr_len; /* # left to compare */
+
+ /* Update fc and determine the length for the next upload */
+ if (fc->cmp_len > FM_BUF_SZ)
+ fc->sgc.length = FM_BUF_SZ;
+ else
+ fc->sgc.length = fc->cmp_len;
+
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ((u8 *)fc->scratch - (u8 *)fi);
+ }
+
+ /*
+ * This code uses a 'while' statement since the next component may
+ * have a length = zero. This can happen since some components are
+ * not required. At the end of this 'while' we set up the length
+ * for the next request and therefore sgc.length can be = 0.
+ */
+ while (fc->sgc.length == 0) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ switch (fc->task) {
+ case FMTSK_ERASE_BOOT:
+ /* the BIOS image is written next */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+ if (ch->length == 0)
+ goto no_bios;
+
+ fc->task = FMTSK_WRTBIOS;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_BIOS;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTBIOS:
+ /*
+ * The BIOS image has been written - read it and
+ * verify it
+ */
+ fc->task = FMTSK_READBIOS;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READBIOS:
+no_bios:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The MAC image is written next */
+ ch = &fi->cmp_hdr[CH_IT_MAC];
+ if (ch->length == 0)
+ goto no_mac;
+
+ fc->task = FMTSK_WRTMAC;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_MAC;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTMAC:
+ /* The MAC image has been written - read and verify */
+ fc->task = FMTSK_READMAC;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READMAC:
+no_mac:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The EFI image is written next */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+ if (ch->length == 0)
+ goto no_efi;
+
+ fc->task = FMTSK_WRTEFI;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_EFI;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length
+ + fi->cmp_hdr[CH_IT_MAC].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTEFI:
+ /* The EFI image has been written - read and verify */
+ fc->task = FMTSK_READEFI;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READEFI:
+no_efi:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The CFG image is written next */
+ ch = &fi->cmp_hdr[CH_IT_CFG];
+
+ if (ch->length == 0)
+ goto no_cfg;
+ fc->task = FMTSK_WRTCFG;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTCFG:
+ /* The CFG image has been written - read and verify */
+ fc->task = FMTSK_READCFG;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READCFG:
+no_cfg:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /*
+ * The download is complete. If in degraded mode,
+ * attempt a chip reset.
+ */
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ esas2r_local_reset_adapter(a);
+
+ a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
+ esas2r_print_flash_rev(a);
+
+ /* Update the type of boot image on the card */
+ memcpy(a->image_type, fi->rel_version,
+ sizeof(fi->rel_version));
+ complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ return;
+ }
+
+ /* If verifying, don't try reading more than what's there */
+ if (fc->func == VDA_FLASH_READ
+ && fc->sgc.length > fc->cmp_len)
+ fc->sgc.length = fc->cmp_len;
+ }
+
+ /* Build the request to perform the next action */
+ if (!load_image(a, rq)) {
+error:
+ if (fc->comp_typ < fi->num_comps) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+ ch->status = CH_STAT_FAILED;
+ }
+
+ complete_fmapi_req(a, rq, FI_STAT_FAILED);
+ }
+}
+
+/* Determine the flash image adaptyp for this adapter */
+static u8 get_fi_adap_type(struct esas2r_adapter *a)
+{
+ u8 type;
+
+ /* use the device ID to get the correct adap_typ for this HBA */
+ switch (a->pcid->device) {
+ case ATTO_DID_INTEL_IOP348:
+ type = FI_AT_SUN_LAKE;
+ break;
+
+ case ATTO_DID_MV_88RC9580:
+ case ATTO_DID_MV_88RC9580TS:
+ case ATTO_DID_MV_88RC9580TSE:
+ case ATTO_DID_MV_88RC9580TL:
+ type = FI_AT_MV_9580;
+ break;
+
+ default:
+ type = FI_AT_UNKNWN;
+ break;
+ }
+
+ return type;
+}
+
+/* Size of config + copyright + flash_ver images, 0 for failure. */
+static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
+{
+ u16 *pw = (u16 *)cfg - 1;
+ u32 sz = 0;
+ u32 len = length;
+
+ if (len == 0)
+ len = FM_BUF_SZ;
+
+ if (flash_ver)
+ *flash_ver = 0;
+
+ while (true) {
+ u16 type;
+ u16 size;
+
+ type = le16_to_cpu(*pw--);
+ size = le16_to_cpu(*pw--);
+
+ if (type != FBT_CPYR
+ && type != FBT_SETUP
+ && type != FBT_FLASH_VER)
+ break;
+
+ if (type == FBT_FLASH_VER
+ && flash_ver)
+ *flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
+
+ sz += size + (2 * sizeof(u16));
+ pw -= size / sizeof(u16);
+
+ if (sz > len - (2 * sizeof(u16)))
+ break;
+ }
+
+ /* See if we are comparing the size to the specified length */
+ if (length && sz != length)
+ return 0;
+
+ return sz;
+}
+
+/* Verify that the boot image is valid */
+static u8 chk_boot(u8 *boot_img, u32 length)
+{
+ struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
+ u16 hdroffset = le16_to_cpu(bi->header_offset);
+ struct esas2r_boot_header *bh;
+
+ if (bi->signature != le16_to_cpu(0xaa55)
+ || (long)hdroffset >
+ (long)(65536L - sizeof(struct esas2r_boot_header))
+ || (hdroffset & 3)
+ || (hdroffset < sizeof(struct esas2r_boot_image))
+ || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
+ return 0xff;
+
+ bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
+
+ if (bh->signature[0] != 'P'
+ || bh->signature[1] != 'C'
+ || bh->signature[2] != 'I'
+ || bh->signature[3] != 'R'
+ || le16_to_cpu(bh->struct_length) <
+ (u16)sizeof(struct esas2r_boot_header)
+ || bh->class_code[2] != 0x01
+ || bh->class_code[1] != 0x04
+ || bh->class_code[0] != 0x00
+ || (bh->code_type != CODE_TYPE_PC
+ && bh->code_type != CODE_TYPE_OPEN
+ && bh->code_type != CODE_TYPE_EFI))
+ return 0xff;
+
+ return bh->code_type;
+}
+
+/* The sum of all the WORDS of the image */
+static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u16 cksum;
+ u32 len;
+ u16 *pw;
+
+ for (len = (fi->length - fc->fi_hdr_len) / 2,
+ pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
+ cksum = 0;
+ len;
+ len--, pw++)
+ cksum = cksum + le16_to_cpu(*pw);
+
+ return cksum;
+}
+
+/*
+ * Verify the flash image structure. The following verifications will
+ * be performed:
+ * 1) verify the fi_version is correct
+ * 2) verify the checksum of the entire image.
+ * 3) validate the adap_typ, action and length fields.
+ * 4) valdiate each component header. check the img_type and
+ * length fields
+ * 5) valdiate each component image. validate signatures and
+ * local checksums
+ */
+static bool verify_fi(struct esas2r_adapter *a,
+ struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u8 type;
+ bool imgerr;
+ u16 i;
+ u32 len;
+ struct esas2r_component_header *ch;
+
+ /* Verify the length - length must even since we do a word checksum */
+ len = fi->length;
+
+ if ((len & 1)
+ || len < fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Get adapter type and verify type in flash image */
+ type = get_fi_adap_type(a);
+ if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
+ fi->status = FI_STAT_ADAPTYP;
+ return false;
+ }
+
+ /*
+ * Loop through each component and verify the img_type and length
+ * fields. Keep a running count of the sizes sooze we can verify total
+ * size to additive size.
+ */
+ imgerr = false;
+
+ for (i = 0, len = 0, ch = fi->cmp_hdr;
+ i < fi->num_comps;
+ i++, ch++) {
+ bool cmperr = false;
+
+ /*
+ * Verify that the component header has the same index as the
+ * image type. The headers must be ordered correctly
+ */
+ if (i != ch->img_type) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ continue;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_BIOS:
+ type = CODE_TYPE_PC;
+ break;
+
+ case CH_IT_MAC:
+ type = CODE_TYPE_OPEN;
+ break;
+
+ case CH_IT_EFI:
+ type = CODE_TYPE_EFI;
+ break;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_FW:
+ case CH_IT_NVR:
+ break;
+
+ case CH_IT_BIOS:
+ case CH_IT_MAC:
+ case CH_IT_EFI:
+ if (ch->length & 0x1ff)
+ cmperr = true;
+
+ /* Test if component image is present */
+ if (ch->length == 0)
+ break;
+
+ /* Image is present - verify the image */
+ if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
+ != type)
+ cmperr = true;
+
+ break;
+
+ case CH_IT_CFG:
+
+ /* Test if component image is present */
+ if (ch->length == 0) {
+ cmperr = true;
+ break;
+ }
+
+ /* Image is present - verify the image */
+ if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
+ ch->length, NULL))
+ cmperr = true;
+
+ break;
+
+ default:
+
+ fi->status = FI_STAT_UNKNOWN;
+ return false;
+ }
+
+ if (cmperr) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ } else {
+ ch->status = CH_STAT_PENDING;
+ len += ch->length;
+ }
+ }
+
+ if (imgerr) {
+ fi->status = FI_STAT_MISSING;
+ return false;
+ }
+
+ /* Compare fi->length to the sum of ch->length fields */
+ if (len != fi->length - fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Compute the checksum - it should come out zero */
+ if (fi->checksum != calc_fi_checksum(fc)) {
+ fi->status = FI_STAT_CHKSUM;
+ return false;
+ }
+
+ return true;
+}
+
+/* Fill in the FS IOCTL response data from a completed request. */
+static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)rq->interrupt_cx;
+
+ if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ esas2r_enable_heartbeat(a);
+
+ fs->driver_error = rq->req_stat;
+
+ if (fs->driver_error == RS_SUCCESS)
+ fs->status = ATTO_STS_SUCCESS;
+ else
+ fs->status = ATTO_STS_FAILED;
+}
+
+/* Prepare an FS IOCTL request to be sent to the firmware. */
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
+ struct esas2r_ioctlfs_command *fsc = &fs->command;
+ u8 func = 0;
+ u32 datalen;
+
+ fs->status = ATTO_STS_FAILED;
+ fs->driver_error = RS_PENDING;
+
+ if (fs->version > ESAS2R_FS_VER) {
+ fs->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ if (fsc->command >= cmdcnt) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ func = cmd_to_fls_func[fsc->command];
+ if (func == 0xFF) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
+ if ((a->pcid->device != ATTO_DID_MV_88RC9580
+ || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TS
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TL
+ || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
+ fs->status = ATTO_STS_INV_ADAPTER;
+ return false;
+ }
+
+ if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
+ fs->status = ATTO_STS_INV_DRVR_VER;
+ return false;
+ }
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ fs->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ rq->interrupt_cb = esas2r_complete_fs_ioctl;
+ rq->interrupt_cx = fs;
+ datalen = le32_to_cpu(fsc->length);
+ esas2r_build_flash_req(a,
+ rq,
+ func,
+ fsc->checksum,
+ le32_to_cpu(fsc->flash_addr),
+ datalen);
+
+ if (func == VDA_FLASH_WRITE
+ || func == VDA_FLASH_READ) {
+ if (datalen == 0) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ if (func == VDA_FLASH_COMMIT)
+ esas2r_disable_heartbeat(a);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
+{
+ u32 starttime;
+ u32 timeout;
+ u32 intstat;
+ u32 doorbell;
+
+ /* Disable chip interrupts awhile */
+ if (function == DRBL_FLASH_REQ)
+ esas2r_disable_chip_interrupts(a);
+
+ /* Issue the request to the firmware */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
+
+ /* Now wait for the firmware to process it */
+ starttime = jiffies_to_msecs(jiffies);
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_DISC_PENDING, &a->flags))
+ timeout = 40000;
+ else
+ timeout = 5000;
+
+ while (true) {
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ /* Got a doorbell interrupt. Check for the function */
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (doorbell & function)
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ /*
+ * Iimeout. If we were requesting flash access,
+ * indicate we are done so the firmware knows we gave
+ * up. If this was a REQ, we also need to re-enable
+ * chip interrupts.
+ */
+ if (function == DRBL_FLASH_REQ) {
+ esas2r_hdebug("flash access timeout");
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_FLASH_DONE);
+ esas2r_enable_chip_interrupts(a);
+ } else {
+ esas2r_hdebug("flash release timeout");
+ }
+
+ return false;
+ }
+ }
+
+ /* if we're done, re-enable chip interrupts */
+ if (function == DRBL_FLASH_DONE)
+ esas2r_enable_chip_interrupts(a);
+
+ return true;
+}
+
+#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
+
+bool esas2r_read_flash_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ /* Try to acquire access to the flash */
+ if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
+ return false;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
+ iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
+ else
+ iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+ offset = from & (WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > WINDOW_SIZE - offset)
+ len = WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ /* Release flash access */
+ esas2r_flash_access(a, DRBL_FLASH_DONE);
+ return true;
+}
+
+bool esas2r_read_flash_rev(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ u16 *pw;
+ u16 *pwstart;
+ u16 type;
+ u16 size;
+ u32 sz;
+
+ sz = sizeof(bytes);
+ pw = (u16 *)(bytes + sz);
+ pwstart = (u16 *)bytes + 2;
+
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
+ goto invalid_rev;
+
+ while (pw >= pwstart) {
+ pw--;
+ type = le16_to_cpu(*pw);
+ pw--;
+ size = le16_to_cpu(*pw);
+ pw -= size / 2;
+
+ if (type == FBT_CPYR
+ || type == FBT_SETUP
+ || pw < pwstart)
+ continue;
+
+ if (type == FBT_FLASH_VER)
+ a->flash_ver = le32_to_cpu(*(u32 *)pw);
+
+ break;
+ }
+
+invalid_rev:
+ return esas2r_print_flash_rev(a);
+}
+
+bool esas2r_print_flash_rev(struct esas2r_adapter *a)
+{
+ u16 year = LOWORD(a->flash_ver);
+ u8 day = LOBYTE(HIWORD(a->flash_ver));
+ u8 month = HIBYTE(HIWORD(a->flash_ver));
+
+ if (day == 0
+ || month == 0
+ || day > 31
+ || month > 12
+ || year < 2006
+ || year > 9999) {
+ strcpy(a->flash_rev, "not found");
+ a->flash_ver = 0;
+ return false;
+ }
+
+ sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
+ esas2r_hdebug("flash version: %s", a->flash_rev);
+ return true;
+}
+
+/*
+ * Find the type of boot image type that is currently in the flash.
+ * The chip only has a 64 KB PCI-e expansion ROM
+ * size so only one image can be flashed at a time.
+ */
+bool esas2r_read_image_type(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ struct esas2r_boot_image *bi;
+ struct esas2r_boot_header *bh;
+ u32 sz;
+ u32 len;
+ u32 offset;
+
+ /* Start at the base of the boot images and look for a valid image */
+ sz = sizeof(bytes);
+ len = FLS_LENGTH_BOOT;
+ offset = 0;
+
+ while (true) {
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
+ offset,
+ sz))
+ goto invalid_rev;
+
+ bi = (struct esas2r_boot_image *)bytes;
+ bh = (struct esas2r_boot_header *)((u8 *)bi +
+ le16_to_cpu(
+ bi->header_offset));
+ if (bi->signature != cpu_to_le16(0xAA55))
+ goto invalid_rev;
+
+ if (bh->code_type == CODE_TYPE_PC) {
+ strcpy(a->image_type, "BIOS");
+
+ return true;
+ } else if (bh->code_type == CODE_TYPE_EFI) {
+ struct esas2r_efi_image *ei;
+
+ /*
+ * So we have an EFI image. There are several types
+ * so see which architecture we have.
+ */
+ ei = (struct esas2r_efi_image *)bytes;
+
+ switch (le16_to_cpu(ei->machine_type)) {
+ case EFI_MACHINE_IA32:
+ strcpy(a->image_type, "EFI 32-bit");
+ return true;
+
+ case EFI_MACHINE_IA64:
+ strcpy(a->image_type, "EFI itanium");
+ return true;
+
+ case EFI_MACHINE_X64:
+ strcpy(a->image_type, "EFI 64-bit");
+ return true;
+
+ case EFI_MACHINE_EBC:
+ strcpy(a->image_type, "EFI EBC");
+ return true;
+
+ default:
+ goto invalid_rev;
+ }
+ } else {
+ u32 thislen;
+
+ /* jump to the next image */
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+ if (thislen == 0
+ || thislen + offset > len
+ || bh->indicator == INDICATOR_LAST)
+ break;
+
+ offset += thislen;
+ }
+ }
+
+invalid_rev:
+ strcpy(a->image_type, "no boot images");
+ return false;
+}
+
+/*
+ * Read and validate current NVRAM parameters by accessing
+ * physical NVRAM directly. if currently stored parameters are
+ * invalid, use the defaults.
+ */
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
+{
+ bool result;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram))) {
+ esas2r_hdebug("NVRAM read failed, using defaults");
+ return false;
+ }
+
+ result = esas2r_nvram_validate(a);
+
+ up(&a->nvram_semaphore);
+
+ return result;
+}
+
+/* Interrupt callback to process NVRAM completions. */
+static void esas2r_nvram_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* last request was successful. see what to do now. */
+
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_READ:
+ esas2r_nvram_validate(a);
+ break;
+
+ case VDA_FLASH_COMMIT:
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING) {
+ /* update the NVRAM state */
+ if (rq->req_stat == RS_SUCCESS)
+ set_bit(AF_NVR_VALID, &a->flags);
+ else
+ clear_bit(AF_NVR_VALID, &a->flags);
+
+ esas2r_enable_heartbeat(a);
+
+ up(&a->nvram_semaphore);
+ }
+}
+
+/*
+ * Write the contents of nvram to the adapter's physical NVRAM.
+ * The cached copy of the NVRAM is also updated.
+ */
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram)
+{
+ struct esas2r_sas_nvram *n = nvram;
+ u8 sas_address_bytes[8];
+ u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return false;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (n == NULL)
+ n = a->nvram;
+
+ /* check the validity of the settings */
+ if (n->version > SASNVR_VERSION) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ memcpy(&sas_address_bytes[0], n->sas_addr, 8);
+
+ if (sas_address_bytes[0] != 0x50
+ || sas_address_bytes[1] != 0x01
+ || sas_address_bytes[2] != 0x08
+ || (sas_address_bytes[3] & 0xF0) != 0x60
+ || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ if (n->spin_up_delay > SASNVR_SPINUP_MAX)
+ n->spin_up_delay = SASNVR_SPINUP_MAX;
+
+ n->version = SASNVR_VERSION;
+ n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
+ memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
+
+ /* write the NVRAM */
+ n = a->nvram;
+ esas2r_disable_heartbeat(a);
+
+ esas2r_build_flash_req(a,
+ rq,
+ VDA_FLASH_BEGINW,
+ esas2r_nvramcalc_xor_cksum(n),
+ FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram));
+
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+
+ vrq->data.sge[0].length =
+ cpu_to_le32(SGE_LAST |
+ sizeof(struct esas2r_sas_nvram));
+ vrq->data.sge[0].address = cpu_to_le64(
+ a->uncached_phys + (u64)((u8 *)n - a->uncached));
+ } else {
+ vrq->data.prde[0].ctl_len =
+ cpu_to_le32(sizeof(struct esas2r_sas_nvram));
+ vrq->data.prde[0].address = cpu_to_le64(
+ a->uncached_phys
+ + (u64)((u8 *)n - a->uncached));
+ }
+ rq->interrupt_cb = esas2r_nvram_callback;
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
+bool esas2r_nvram_validate(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ bool rslt = false;
+
+ if (n->signature[0] != 'E'
+ || n->signature[1] != 'S'
+ || n->signature[2] != 'A'
+ || n->signature[3] != 'S') {
+ esas2r_hdebug("invalid NVRAM signature");
+ } else if (esas2r_nvramcalc_cksum(n)) {
+ esas2r_hdebug("invalid NVRAM checksum");
+ } else if (n->version > SASNVR_VERSION) {
+ esas2r_hdebug("invalid NVRAM version");
+ } else {
+ set_bit(AF_NVR_VALID, &a->flags);
+ rslt = true;
+ }
+
+ if (rslt == false) {
+ esas2r_hdebug("using defaults");
+ esas2r_nvram_set_defaults(a);
+ }
+
+ return rslt;
+}
+
+/*
+ * Set the cached NVRAM to defaults. note that this function sets the default
+ * NVRAM when it has been determined that the physical NVRAM is invalid.
+ * In this case, the SAS address is fabricated.
+ */
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ u32 time = jiffies_to_msecs(jiffies);
+
+ clear_bit(AF_NVR_VALID, &a->flags);
+ *n = default_sas_nvram;
+ n->sas_addr[3] |= 0x0F;
+ n->sas_addr[4] = HIBYTE(LOWORD(time));
+ n->sas_addr[5] = LOBYTE(LOWORD(time));
+ n->sas_addr[6] = a->pcid->bus->number;
+ n->sas_addr[7] = a->pcid->devfn;
+}
+
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram)
+{
+ u8 sas_addr[8];
+
+ /*
+ * in case we are copying the defaults into the adapter, copy the SAS
+ * address out first.
+ */
+ memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
+ *nvram = default_sas_nvram;
+ memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
+}
+
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc)
+{
+ struct esas2r_flash_context *fc = &a->flash_context;
+ u8 j;
+ struct esas2r_component_header *ch;
+
+ if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
+ /* flag was already set */
+ fi->status = FI_STAT_BUSY;
+ return false;
+ }
+
+ memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
+ sgc = &fc->sgc;
+ fc->fi = fi;
+ fc->sgc_offset = sgc->cur_offset;
+ rq->req_stat = RS_SUCCESS;
+ rq->interrupt_cx = fc;
+
+ switch (fi->fi_version) {
+ case FI_VERSION_1:
+ fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
+ fc->num_comps = FI_NUM_COMPS_V1;
+ fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
+ break;
+
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
+
+ switch (fi->action) {
+ case FI_ACT_DOWN: /* Download the components */
+ /* Verify the format of the flash image */
+ if (!verify_fi(a, fc))
+ return complete_fmapi_req(a, rq, fi->status);
+
+ /* Adjust the BIOS fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+
+ if (ch->length)
+ fix_bios(a, fi);
+
+ /* Adjust the EFI fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+
+ if (ch->length)
+ fix_efi(a, fi);
+
+ /*
+ * Since the image was just modified, compute the checksum on
+ * the modified image. First update the CRC for the composite
+ * expansion ROM image.
+ */
+ fi->checksum = calc_fi_checksum(fc);
+
+ /* Disable the heartbeat */
+ esas2r_disable_heartbeat(a);
+
+ /* Now start up the download sequence */
+ fc->task = FMTSK_ERASE_BOOT;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = FLS_LENGTH_BOOT;
+ fc->sgc.cur_offset = NULL;
+
+ /* Setup the callback address */
+ fc->interrupt_cb = fw_download_proc;
+ break;
+
+ case FI_ACT_UPSZ: /* Get upload sizes */
+ fi->adap_typ = get_fi_adap_type(a);
+ fi->flags = 0;
+ fi->num_comps = fc->num_comps;
+ fi->length = fc->fi_hdr_len;
+
+ /* Report the type of boot image in the rel_version string */
+ memcpy(fi->rel_version, a->image_type,
+ sizeof(fi->rel_version));
+
+ /* Build the component headers */
+ for (j = 0, ch = fi->cmp_hdr;
+ j < fi->num_comps;
+ j++, ch++) {
+ ch->img_type = j;
+ ch->status = CH_STAT_PENDING;
+ ch->length = 0;
+ ch->version = 0xffffffff;
+ ch->image_offset = 0;
+ ch->pad[0] = 0;
+ ch->pad[1] = 0;
+ }
+
+ if (a->flash_ver != 0) {
+ fi->cmp_hdr[CH_IT_BIOS].version =
+ fi->cmp_hdr[CH_IT_MAC].version =
+ fi->cmp_hdr[CH_IT_EFI].version =
+ fi->cmp_hdr[CH_IT_CFG].version
+ = a->flash_ver;
+
+ fi->cmp_hdr[CH_IT_BIOS].status =
+ fi->cmp_hdr[CH_IT_MAC].status =
+ fi->cmp_hdr[CH_IT_EFI].status =
+ fi->cmp_hdr[CH_IT_CFG].status =
+ CH_STAT_SUCCESS;
+
+ return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ }
+
+ /* fall through */
+
+ case FI_ACT_UP: /* Upload the components */
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_INVALID);
+ }
+
+ /*
+ * If we make it here, fc has been setup to do the first task. Call
+ * load_image to format the request, start it, and get out. The
+ * interrupt code will call the callback when the first message is
+ * complete.
+ */
+ if (!load_image(a, rq))
+ return complete_fmapi_req(a, rq, FI_STAT_FAILED);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
new file mode 100644
index 000000000000..b9750e296d71
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -0,0 +1,1771 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_init.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc,
+ u32 align)
+{
+ mem_desc->esas2r_param = mem_desc->size + align;
+ mem_desc->virt_addr = NULL;
+ mem_desc->phys_addr = 0;
+ mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)mem_desc->
+ esas2r_param,
+ (dma_addr_t *)&mem_desc->
+ phys_addr,
+ GFP_KERNEL);
+
+ if (mem_desc->esas2r_data == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %lu bytes of consistent memory!",
+ (long
+ unsigned
+ int)mem_desc->esas2r_param);
+ return false;
+ }
+
+ mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
+ mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
+ memset(mem_desc->virt_addr, 0, mem_desc->size);
+ return true;
+}
+
+static void esas2r_initmem_free(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc)
+{
+ if (mem_desc->virt_addr == NULL)
+ return;
+
+ /*
+ * Careful! phys_addr and virt_addr may have been adjusted from the
+ * original allocation in order to return the desired alignment. That
+ * means we have to use the original address (in esas2r_data) and size
+ * (esas2r_param) and calculate the original physical address based on
+ * the difference between the requested and actual allocation size.
+ */
+ if (mem_desc->phys_addr) {
+ int unalign = ((u8 *)mem_desc->virt_addr) -
+ ((u8 *)mem_desc->esas2r_data);
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)mem_desc->esas2r_param,
+ mem_desc->esas2r_data,
+ (dma_addr_t)(mem_desc->phys_addr - unalign));
+ } else {
+ kfree(mem_desc->esas2r_data);
+ }
+
+ mem_desc->virt_addr = NULL;
+}
+
+static bool alloc_vda_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_mem_desc *memdesc = kzalloc(
+ sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (memdesc == NULL) {
+ esas2r_hdebug("could not alloc mem for vda request memdesc\n");
+ return false;
+ }
+
+ memdesc->size = sizeof(union atto_vda_req) +
+ ESAS2R_DATA_BUF_LEN;
+
+ if (!esas2r_initmem_alloc(a, memdesc, 256)) {
+ esas2r_hdebug("could not alloc mem for vda request\n");
+ kfree(memdesc);
+ return false;
+ }
+
+ a->num_vrqs++;
+ list_add(&memdesc->next_desc, &a->vrq_mds_head);
+
+ rq->vrq_md = memdesc;
+ rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
+ rq->vrq->scsi.handle = a->num_vrqs;
+
+ return true;
+}
+
+static void esas2r_unmap_regions(struct esas2r_adapter *a)
+{
+ if (a->regs)
+ iounmap((void __iomem *)a->regs);
+
+ a->regs = NULL;
+
+ pci_release_region(a->pcid, 2);
+
+ if (a->data_window)
+ iounmap((void __iomem *)a->data_window);
+
+ a->data_window = NULL;
+
+ pci_release_region(a->pcid, 0);
+}
+
+static int esas2r_map_regions(struct esas2r_adapter *a)
+{
+ int error;
+
+ a->regs = NULL;
+ a->data_window = NULL;
+
+ error = pci_request_region(a->pcid, 2, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+
+ return error;
+ }
+
+ a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
+ pci_resource_len(a->pcid, 2));
+ if (a->regs == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for regs mem region\n");
+ pci_release_region(a->pcid, 2);
+ return -EFAULT;
+ }
+
+ error = pci_request_region(a->pcid, 0, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+ esas2r_unmap_regions(a);
+ return error;
+ }
+
+ a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
+ 0),
+ pci_resource_len(a->pcid, 0));
+ if (a->data_window == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for data_window mem region\n");
+ esas2r_unmap_regions(a);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
+{
+ int i;
+
+ /* Set up interrupt mode based on the requested value */
+ switch (intr_mode) {
+ case INTR_MODE_LEGACY:
+use_legacy_interrupts:
+ a->intr_mode = INTR_MODE_LEGACY;
+ break;
+
+ case INTR_MODE_MSI:
+ i = pci_enable_msi(a->pcid);
+ if (i != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "failed to enable MSI for adapter %d, "
+ "falling back to legacy interrupts "
+ "(err=%d)", a->index,
+ i);
+ goto use_legacy_interrupts;
+ }
+ a->intr_mode = INTR_MODE_MSI;
+ set_bit(AF2_MSI_ENABLED, &a->flags2);
+ break;
+
+
+ default:
+ esas2r_log(ESAS2R_LOG_WARN,
+ "unknown interrupt_mode %d requested, "
+ "falling back to legacy interrupt",
+ interrupt_mode);
+ goto use_legacy_interrupts;
+ }
+}
+
+static void esas2r_claim_interrupts(struct esas2r_adapter *a)
+{
+ unsigned long flags = IRQF_DISABLED;
+
+ if (a->intr_mode == INTR_MODE_LEGACY)
+ flags |= IRQF_SHARED;
+
+ esas2r_log(ESAS2R_LOG_INFO,
+ "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+ a->pcid->irq, a, a->name, flags);
+
+ if (request_irq(a->pcid->irq,
+ (a->intr_mode ==
+ INTR_MODE_LEGACY) ? esas2r_interrupt :
+ esas2r_msi_interrupt,
+ flags,
+ a->name,
+ a)) {
+ esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
+ a->pcid->irq);
+ return;
+ }
+
+ set_bit(AF2_IRQ_CLAIMED, &a->flags2);
+ esas2r_log(ESAS2R_LOG_INFO,
+ "claimed IRQ %d flags: 0x%lx",
+ a->pcid->irq, flags);
+}
+
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index)
+{
+ struct esas2r_adapter *a;
+ u64 bus_addr = 0;
+ int i;
+ void *next_uncached;
+ struct esas2r_request *first_request, *last_request;
+
+ if (index >= MAX_ADAPTERS) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init invalid adapter index %u!",
+ index);
+ return 0;
+ }
+
+ if (esas2r_adapters[index]) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init existing adapter index %u!",
+ index);
+ return 0;
+ }
+
+ a = (struct esas2r_adapter *)host->hostdata;
+ memset(a, 0, sizeof(struct esas2r_adapter));
+ a->pcid = pcid;
+ a->host = host;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask = dma_get_required_mask
+ (&pcid->dev);
+ if (required_mask > DMA_BIT_MASK(32)
+ && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(64))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "64-bit PCI addressing enabled\n");
+ } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ } else {
+ if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+ esas2r_adapters[index] = a;
+ sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
+ esas2r_debug("new adapter %p, name %s", a, a->name);
+ spin_lock_init(&a->request_lock);
+ spin_lock_init(&a->fw_event_lock);
+ sema_init(&a->fm_api_semaphore, 1);
+ sema_init(&a->fs_api_semaphore, 1);
+ sema_init(&a->nvram_semaphore, 1);
+
+ esas2r_fw_event_off(a);
+ snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
+ a->index);
+ a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+
+ init_waitqueue_head(&a->buffered_ioctl_waiter);
+ init_waitqueue_head(&a->nvram_waiter);
+ init_waitqueue_head(&a->fm_api_waiter);
+ init_waitqueue_head(&a->fs_api_waiter);
+ init_waitqueue_head(&a->vda_waiter);
+
+ INIT_LIST_HEAD(&a->general_req.req_list);
+ INIT_LIST_HEAD(&a->active_list);
+ INIT_LIST_HEAD(&a->defer_list);
+ INIT_LIST_HEAD(&a->free_sg_list_head);
+ INIT_LIST_HEAD(&a->avail_request);
+ INIT_LIST_HEAD(&a->vrq_mds_head);
+ INIT_LIST_HEAD(&a->fw_event_list);
+
+ first_request = (struct esas2r_request *)((u8 *)(a + 1));
+
+ for (last_request = first_request, i = 1; i < num_requests;
+ last_request++, i++) {
+ INIT_LIST_HEAD(&last_request->req_list);
+ list_add_tail(&last_request->comp_list, &a->avail_request);
+ if (!alloc_vda_req(a, last_request)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate a VDA request!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+
+ esas2r_debug("requests: %p to %p (%d, %d)", first_request,
+ last_request,
+ sizeof(*first_request),
+ num_requests);
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->index = index;
+
+ /* interrupts will be disabled until we are done with init */
+ atomic_inc(&a->dis_ints_cnt);
+ atomic_inc(&a->disable_cnt);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_FIRST_INIT, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+
+ esas2r_setup_interrupts(a, interrupt_mode);
+
+ a->uncached_size = esas2r_get_uncached_size(a);
+ a->uncached = dma_alloc_coherent(&pcid->dev,
+ (size_t)a->uncached_size,
+ (dma_addr_t *)&bus_addr,
+ GFP_KERNEL);
+ if (a->uncached == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %d bytes of consistent memory!",
+ a->uncached_size);
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->uncached_phys = bus_addr;
+
+ esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
+ a->uncached_size,
+ a->uncached,
+ upper_32_bits(bus_addr),
+ lower_32_bits(bus_addr));
+ memset(a->uncached, 0, a->uncached_size);
+ next_uncached = a->uncached;
+
+ if (!esas2r_init_adapter_struct(a,
+ &next_uncached)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to initialize adapter structure (2)!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ tasklet_init(&a->tasklet,
+ esas2r_adapter_tasklet,
+ (unsigned long)a);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts
+ * until we claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_check_adapter(a);
+
+ if (!esas2r_init_adapter_hw(a, true))
+ esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
+ else
+ esas2r_debug("esas2r_init_adapter ok");
+
+ esas2r_claim_interrupts(a);
+
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
+ esas2r_enable_chip_interrupts(a);
+
+ set_bit(AF2_INIT_DONE, &a->flags2);
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags))
+ esas2r_kickoff_timer(a);
+ esas2r_debug("esas2r_init_adapter done for %p (%d)",
+ a, a->disable_cnt);
+
+ return 1;
+}
+
+static void esas2r_adapter_power_down(struct esas2r_adapter *a,
+ int power_management)
+{
+ struct esas2r_mem_desc *memdesc, *next;
+
+ if ((test_bit(AF2_INIT_DONE, &a->flags2))
+ && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
+ if (!power_management) {
+ del_timer_sync(&a->timer);
+ tasklet_kill(&a->tasklet);
+ }
+ esas2r_power_down(a);
+
+ /*
+ * There are versions of firmware that do not handle the sync
+ * cache command correctly. Stall here to ensure that the
+ * cache is lazily flushed.
+ */
+ mdelay(500);
+ esas2r_debug("chip halted");
+ }
+
+ /* Remove sysfs binary files */
+ if (a->sysfs_fw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
+ a->sysfs_fw_created = 0;
+ }
+
+ if (a->sysfs_fs_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
+ a->sysfs_fs_created = 0;
+ }
+
+ if (a->sysfs_vda_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
+ a->sysfs_vda_created = 0;
+ }
+
+ if (a->sysfs_hw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
+ a->sysfs_hw_created = 0;
+ }
+
+ if (a->sysfs_live_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_live_nvram);
+ a->sysfs_live_nvram_created = 0;
+ }
+
+ if (a->sysfs_default_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_default_nvram);
+ a->sysfs_default_nvram_created = 0;
+ }
+
+ /* Clean up interrupts */
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "free_irq(%d) called", a->pcid->irq);
+
+ free_irq(a->pcid->irq, a);
+ esas2r_debug("IRQ released");
+ clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
+ }
+
+ if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
+ pci_disable_msi(a->pcid);
+ clear_bit(AF2_MSI_ENABLED, &a->flags2);
+ esas2r_debug("MSI disabled");
+ }
+
+ if (a->inbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->inbound_list_md);
+
+ if (a->outbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->outbound_list_md);
+
+ list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
+ next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ }
+
+ /* Following frees everything allocated via alloc_vda_req */
+ list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ list_del(&memdesc->next_desc);
+ kfree(memdesc);
+ }
+
+ kfree(a->first_ae_req);
+ a->first_ae_req = NULL;
+
+ kfree(a->sg_list_mds);
+ a->sg_list_mds = NULL;
+
+ kfree(a->req_table);
+ a->req_table = NULL;
+
+ if (a->regs) {
+ esas2r_unmap_regions(a);
+ a->regs = NULL;
+ a->data_window = NULL;
+ esas2r_debug("regions unmapped");
+ }
+}
+
+/* Release/free allocated resources for specified adapters. */
+void esas2r_kill_adapter(int i)
+{
+ struct esas2r_adapter *a = esas2r_adapters[i];
+
+ if (a) {
+ unsigned long flags;
+ struct workqueue_struct *wq;
+ esas2r_debug("killing adapter %p [%d] ", a, i);
+ esas2r_fw_event_off(a);
+ esas2r_adapter_power_down(a, 0);
+ if (esas2r_buffered_ioctl &&
+ (a->pcid == esas2r_buffered_ioctl_pcid)) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+ esas2r_buffered_ioctl = NULL;
+ }
+
+ if (a->vda_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)VDA_MAX_BUFFER_SIZE,
+ a->vda_buffer,
+ (dma_addr_t)a->ppvda_buffer);
+ a->vda_buffer = NULL;
+ }
+ if (a->fs_api_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+ a->fs_api_buffer = NULL;
+ }
+
+ kfree(a->local_atto_ioctl);
+ a->local_atto_ioctl = NULL;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ wq = a->fw_event_q;
+ a->fw_event_q = NULL;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ if (a->uncached) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->uncached_size,
+ a->uncached,
+ (dma_addr_t)a->uncached_phys);
+ a->uncached = NULL;
+ esas2r_debug("uncached area freed");
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_disable_device() called. msix_enabled: %d "
+ "msi_enabled: %d irq: %d pin: %d",
+ a->pcid->msix_enabled,
+ a->pcid->msi_enabled,
+ a->pcid->irq,
+ a->pcid->pin);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "before pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ pci_disable_device(a->pcid);
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "after pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_set_drv_data(%p, NULL) called",
+ a->pcid);
+
+ pci_set_drvdata(a->pcid, NULL);
+ esas2r_adapters[i] = NULL;
+
+ if (test_bit(AF2_INIT_DONE, &a->flags2)) {
+ clear_bit(AF2_INIT_DONE, &a->flags2);
+
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_remove_host() called");
+
+ scsi_remove_host(a->host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(a->host);
+ }
+ }
+}
+
+int esas2r_cleanup(struct Scsi_Host *host)
+{
+ struct esas2r_adapter *a;
+ int index;
+
+ if (host == NULL) {
+ int i;
+
+ esas2r_debug("esas2r_cleanup everything");
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_kill_adapter(i);
+ return -1;
+ }
+
+ esas2r_debug("esas2r_cleanup called for host %p", host);
+ a = (struct esas2r_adapter *)host->hostdata;
+ index = a->index;
+ esas2r_kill_adapter(index);
+ return index;
+}
+
+int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ u32 device_state;
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+ if (!a)
+ return -ENODEV;
+
+ esas2r_adapter_power_down(a, 1);
+ device_state = pci_choose_state(pdev, state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_save_state() called");
+ pci_save_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_disable_device() called");
+ pci_disable_device(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state() called");
+ pci_set_power_state(pdev, device_state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+ return 0;
+}
+
+int esas2r_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+ int rez;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state(PCI_D0) "
+ "called");
+ pci_set_power_state(pdev, PCI_D0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_wake(PCI_D0, 0) "
+ "called");
+ pci_enable_wake(pdev, PCI_D0, 0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_restore_state() called");
+ pci_restore_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_device() called");
+ rez = pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ if (!a) {
+ rez = -ENODEV;
+ goto error_exit;
+ }
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ /* Set up interupt mode */
+ esas2r_setup_interrupts(a, a->intr_mode);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts until we
+ * claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ if (!esas2r_power_up(a, true)) {
+ esas2r_debug("yikes, esas2r_power_up failed");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ esas2r_claim_interrupts(a);
+
+ if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
+ /*
+ * Now that system interrupt(s) are claimed, we can enable
+ * chip interrupts.
+ */
+ esas2r_enable_chip_interrupts(a);
+ esas2r_kickoff_timer(a);
+ } else {
+ esas2r_debug("yikes, unable to claim IRQ");
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+error_exit:
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+ rez);
+ return rez;
+}
+
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
+{
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "setting adapter to degraded mode: %s\n", error_str);
+ return false;
+}
+
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
+{
+ return sizeof(struct esas2r_sas_nvram)
+ + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
+ + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
+ + 8
+ + (num_sg_lists * (u16)sgl_page_size)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct esas2r_inbound_list_source_entry),
+ 8)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct atto_vda_ob_rsp), 8)
+ + 256; /* VDA request and buffer align */
+}
+
+static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
+{
+ int pcie_cap_reg;
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (pcie_cap_reg) {
+ u16 devcontrol;
+
+ pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
+ &devcontrol);
+
+ if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "max read request size > 512B");
+
+ devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
+ devcontrol |= 0x2000;
+ pci_write_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_DEVCTL,
+ devcontrol);
+ }
+ }
+}
+
+/*
+ * Determine the organization of the uncached data area and
+ * finish initializing the adapter structure
+ */
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area)
+{
+ u32 i;
+ u8 *high;
+ struct esas2r_inbound_list_source_entry *element;
+ struct esas2r_request *rq;
+ struct esas2r_mem_desc *sgl;
+
+ spin_lock_init(&a->sg_list_lock);
+ spin_lock_init(&a->mem_lock);
+ spin_lock_init(&a->queue_lock);
+
+ a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
+
+ if (!alloc_vda_req(a, &a->general_req)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request for the general req!");
+ return false;
+ }
+
+ /* allocate requests for asynchronous events */
+ a->first_ae_req =
+ kzalloc(num_ae_requests * sizeof(struct esas2r_request),
+ GFP_KERNEL);
+
+ if (a->first_ae_req == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for asynchronous events");
+ return false;
+ }
+
+ /* allocate the S/G list memory descriptors */
+ a->sg_list_mds = kzalloc(
+ num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (a->sg_list_mds == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for s/g list descriptors");
+ return false;
+ }
+
+ /* allocate the request table */
+ a->req_table =
+ kzalloc((num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
+
+ if (a->req_table == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for the request table");
+ return false;
+ }
+
+ /* initialize PCI configuration space */
+ esas2r_init_pci_cfg_space(a);
+
+ /*
+ * the thunder_stream boards all have a serial flash part that has a
+ * different base address on the AHB bus.
+ */
+ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
+ && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
+ a->flags2 |= AF2_THUNDERBOLT;
+
+ if (test_bit(AF2_THUNDERBOLT, &a->flags2))
+ a->flags2 |= AF2_SERIAL_FLASH;
+
+ if (a->pcid->subsystem_device == ATTO_TLSH_1068)
+ a->flags2 |= AF2_THUNDERLINK;
+
+ /* Uncached Area */
+ high = (u8 *)*uncached_area;
+
+ /* initialize the scatter/gather table pages */
+
+ for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
+ sgl->size = sgl_page_size;
+
+ list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
+
+ if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
+ /* Allow the driver to load if the minimum count met. */
+ if (i < NUM_SGL_MIN)
+ return false;
+ break;
+ }
+ }
+
+ /* compute the size of the lists */
+ a->list_size = num_requests + ESAS2R_LIST_EXTRA;
+
+ /* allocate the inbound list */
+ a->inbound_list_md.size = a->list_size *
+ sizeof(struct
+ esas2r_inbound_list_source_entry);
+
+ if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the outbound list */
+ a->outbound_list_md.size = a->list_size *
+ sizeof(struct atto_vda_ob_rsp);
+
+ if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
+ ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the NVRAM structure */
+ a->nvram = (struct esas2r_sas_nvram *)high;
+ high += sizeof(struct esas2r_sas_nvram);
+
+ /* allocate the discovery buffer */
+ a->disc_buffer = high;
+ high += ESAS2R_DISC_BUF_LEN;
+ high = PTR_ALIGN(high, 8);
+
+ /* allocate the outbound list copy pointer */
+ a->outbound_copy = (u32 volatile *)high;
+ high += sizeof(u32);
+
+ if (!test_bit(AF_NVR_VALID, &a->flags))
+ esas2r_nvram_set_defaults(a);
+
+ /* update the caller's uncached memory area pointer */
+ *uncached_area = (void *)high;
+
+ /* initialize the allocated memory */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ memset(a->req_table, 0,
+ (num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *));
+
+ esas2r_targ_db_initialize(a);
+
+ /* prime parts of the inbound list */
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->
+ inbound_list_md.
+ virt_addr;
+
+ for (i = 0; i < a->list_size; i++) {
+ element->address = 0;
+ element->reserved = 0;
+ element->length = cpu_to_le32(HWILSE_INTERFACE_F0
+ | (sizeof(union
+ atto_vda_req)
+ /
+ sizeof(u32)));
+ element++;
+ }
+
+ /* init the AE requests */
+ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
+ i++) {
+ INIT_LIST_HEAD(&rq->req_list);
+ if (!alloc_vda_req(a, rq)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request!");
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ /* override the completion function */
+ rq->comp_cb = esas2r_ae_complete;
+ }
+ }
+
+ return true;
+}
+
+/* This code will verify that the chip is operational. */
+bool esas2r_check_adapter(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+ u64 ppaddr;
+ u32 dw;
+
+ /*
+ * if the chip reset detected flag is set, we can bypass a bunch of
+ * stuff.
+ */
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags))
+ goto skip_chip_reset;
+
+ /*
+ * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
+ * may have left them enabled or we may be recovering from a fault.
+ */
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
+ esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
+
+ /*
+ * wait for the firmware to become ready by forcing an interrupt and
+ * waiting for a response.
+ */
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ esas2r_force_interrupt(a);
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF) {
+ /*
+ * Give the firmware up to two seconds to enable
+ * register access after a reset.
+ */
+ if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
+ return esas2r_set_degraded_mode(a,
+ "unable to access registers");
+ } else if (doorbell & DRBL_FORCE_INT) {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /*
+ * This driver supports version 0 and version 1 of
+ * the API
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+
+ if (ver == DRBL_FW_VER_0) {
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ return esas2r_set_degraded_mode(a,
+ "unknown firmware version");
+ }
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
+ esas2r_hdebug("FW ready TMO");
+ esas2r_bugon();
+
+ return esas2r_set_degraded_mode(a,
+ "firmware start has timed out");
+ }
+ }
+
+ /* purge any asynchronous events since we will repost them later */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(50));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug("timeout waiting for interface down");
+ break;
+ }
+ }
+skip_chip_reset:
+ /*
+ * first things first, before we go changing any of these registers
+ * disable the communication lists.
+ */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~MU_ILC_ENABLE;
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~MU_OLC_ENABLE;
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /* configure the communication list addresses */
+ ppaddr = a->inbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->outbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->uncached_phys +
+ ((u8 *)a->outbound_copy - a->uncached);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
+ upper_32_bits(ppaddr));
+
+ /* reset the read and write pointers */
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
+ MU_OLW_TOGGLE | a->last_write);
+
+ /* configure the interface select fields */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
+ dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
+ esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
+ (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
+ dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
+ esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
+ (dw | MU_OLIC_LIST_F0 |
+ MU_OLIC_SOURCE_DDR));
+
+ /* finish configuring the communication lists */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
+ dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
+ | (a->list_size << MU_ILC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
+ dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /*
+ * notify the firmware that we're done setting up the communication
+ * list registers. wait here until the firmware is done configuring
+ * its lists. it will signal that it is done by enabling the lists.
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_INIT) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for communication list init");
+ esas2r_bugon();
+ return esas2r_set_degraded_mode(a,
+ "timeout waiting for communication list init");
+ }
+ }
+
+ /*
+ * flag whether the firmware supports the power down doorbell. we
+ * determine this by reading the inbound doorbell enable mask.
+ */
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
+ if (doorbell & DRBL_POWER_DOWN)
+ set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
+ else
+ clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
+
+ /*
+ * enable assertion of outbound queue and doorbell interrupts in the
+ * main interrupt cause register.
+ */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
+ return true;
+}
+
+/* Process the initialization message just completed and format the next one. */
+static bool esas2r_format_init_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u32 msg = a->init_msg;
+ struct atto_vda_cfg_init *ci;
+
+ a->init_msg = 0;
+
+ switch (msg) {
+ case ESAS2R_INIT_MSG_START:
+ case ESAS2R_INIT_MSG_REINIT:
+ {
+ struct timeval now;
+ do_gettimeofday(&now);
+ esas2r_hdebug("CFG init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_INIT,
+ 0,
+ NULL);
+ ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
+ ci->sgl_page_size = cpu_to_le32(sgl_page_size);
+ ci->epoch_time = cpu_to_le32(now.tv_sec);
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_INIT:
+ if (rq->req_stat == RS_SUCCESS) {
+ u32 major;
+ u32 minor;
+ u16 fw_release;
+
+ a->fw_version = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.vda_version);
+ a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
+ fw_release = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.fw_release);
+ major = LOBYTE(fw_release);
+ minor = HIBYTE(fw_release);
+ a->fw_version += (major << 16) + (minor << 24);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+
+ /*
+ * the 2.71 and earlier releases of R6xx firmware did not error
+ * unsupported config requests correctly.
+ */
+
+ if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
+ || (be32_to_cpu(a->fw_version) > 0x00524702)) {
+ esas2r_hdebug("CFG get init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_GET_INIT2,
+ sizeof(struct atto_vda_cfg_init),
+ NULL);
+
+ rq->vrq->cfg.sg_list_offset = offsetof(
+ struct atto_vda_cfg_req,
+ data.sge);
+ rq->vrq->cfg.data.prde.ctl_len =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ rq->vrq->cfg.data.prde.address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_GET_INIT:
+ if (msg == ESAS2R_INIT_MSG_GET_INIT) {
+ ci = (struct atto_vda_cfg_init *)rq->data_buf;
+ if (rq->req_stat == RS_SUCCESS) {
+ a->num_targets_backend =
+ le32_to_cpu(ci->num_targets_backend);
+ a->ioctl_tunnel =
+ le32_to_cpu(ci->ioctl_tunnel);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+ }
+ /* fall through */
+
+ default:
+ rq->req_stat = RS_SUCCESS;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Perform initialization messages via the request queue. Messages are
+ * performed with interrupts disabled.
+ */
+bool esas2r_init_msgs(struct esas2r_adapter *a)
+{
+ bool success = true;
+ struct esas2r_request *rq = &a->general_req;
+
+ esas2r_rq_init_request(rq, a);
+ rq->comp_cb = esas2r_dummy_complete;
+
+ if (a->init_msg == 0)
+ a->init_msg = ESAS2R_INIT_MSG_REINIT;
+
+ while (a->init_msg) {
+ if (esas2r_format_init_msg(a, rq)) {
+ unsigned long flags;
+ while (true) {
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_wait_request(a, rq);
+ if (rq->req_stat != RS_PENDING)
+ break;
+ }
+ }
+
+ if (rq->req_stat == RS_SUCCESS
+ || ((rq->flags & RF_FAILURE_OK)
+ && rq->req_stat != RS_TIMEOUT))
+ continue;
+
+ esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
+ a->init_msg, rq->req_stat, rq->flags);
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ success = false;
+ break;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+ return success;
+}
+
+/* Initialize the adapter chip */
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
+{
+ bool rslt = false;
+ struct esas2r_request *rq;
+ u32 i;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ goto exit;
+
+ if (!test_bit(AF_NVR_VALID, &a->flags)) {
+ if (!esas2r_nvram_read_direct(a))
+ esas2r_log(ESAS2R_LOG_WARN,
+ "invalid/missing NVRAM parameters");
+ }
+
+ if (!esas2r_init_msgs(a)) {
+ esas2r_set_degraded_mode(a, "init messages failed");
+ goto exit;
+ }
+
+ /* The firmware is ready. */
+ clear_bit(AF_DEGRADED_MODE, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+
+ /* Post all the async event requests */
+ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
+ esas2r_start_ae_request(a, rq);
+
+ if (!a->flash_rev[0])
+ esas2r_read_flash_rev(a);
+
+ if (!a->image_type[0])
+ esas2r_read_image_type(a);
+
+ if (a->fw_version == 0)
+ a->fw_rev[0] = 0;
+ else
+ sprintf(a->fw_rev, "%1d.%02d",
+ (int)LOBYTE(HIWORD(a->fw_version)),
+ (int)HIBYTE(HIWORD(a->fw_version)));
+
+ esas2r_hdebug("firmware revision: %s", a->fw_rev);
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags)
+ && (test_bit(AF_FIRST_INIT, &a->flags))) {
+ esas2r_enable_chip_interrupts(a);
+ return true;
+ }
+
+ /* initialize discovery */
+ esas2r_disc_initialize(a);
+
+ /*
+ * wait for the device wait time to expire here if requested. this is
+ * usually requested during initial driver load and possibly when
+ * resuming from a low power state. deferred device waiting will use
+ * interrupts. chip reset recovery always defers device waiting to
+ * avoid being in a TASKLET too long.
+ */
+ if (init_poll) {
+ u32 currtime = a->disc_start_time;
+ u32 nexttick = 100;
+ u32 deltatime;
+
+ /*
+ * Block Tasklets from getting scheduled and indicate this is
+ * polled discovery.
+ */
+ set_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ set_bit(AF_DISC_POLLED, &a->flags);
+
+ /*
+ * Temporarily bring the disable count to zero to enable
+ * deferred processing. Note that the count is already zero
+ * after the first initialization.
+ */
+ if (test_bit(AF_FIRST_INIT, &a->flags))
+ atomic_dec(&a->disable_cnt);
+
+ while (test_bit(AF_DISC_PENDING, &a->flags)) {
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ /*
+ * Determine the need for a timer tick based on the
+ * delta time between this and the last iteration of
+ * this loop. We don't use the absolute time because
+ * then we would have to worry about when nexttick
+ * wraps and currtime hasn't yet.
+ */
+ deltatime = jiffies_to_msecs(jiffies) - currtime;
+ currtime += deltatime;
+
+ /*
+ * Process any waiting discovery as long as the chip is
+ * up. If a chip reset happens during initial polling,
+ * we have to make sure the timer tick processes the
+ * doorbell indicating the firmware is ready.
+ */
+ if (!test_bit(AF_CHPRST_PENDING, &a->flags))
+ esas2r_disc_check_for_work(a);
+
+ /* Simulate a timer tick. */
+ if (nexttick <= deltatime) {
+
+ /* Time for a timer tick */
+ nexttick += 100;
+ esas2r_timer_tick(a);
+ }
+
+ if (nexttick > deltatime)
+ nexttick -= deltatime;
+
+ /* Do any deferred processing */
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ }
+
+ if (test_bit(AF_FIRST_INIT, &a->flags))
+ atomic_inc(&a->disable_cnt);
+
+ clear_bit(AF_DISC_POLLED, &a->flags);
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ }
+
+
+ esas2r_targ_db_report_changes(a);
+
+ /*
+ * For cases where (a) the initialization messages processing may
+ * handle an interrupt for a port event and a discovery is waiting, but
+ * we are not waiting for devices, or (b) the device wait time has been
+ * exhausted but there is still discovery pending, start any leftover
+ * discovery in interrupt driven mode.
+ */
+ esas2r_disc_start_waiting(a);
+
+ /* Enable chip interrupts */
+ a->int_mask = ESAS2R_INT_STS_MASK;
+ esas2r_enable_chip_interrupts(a);
+ esas2r_enable_heartbeat(a);
+ rslt = true;
+
+exit:
+ /*
+ * Regardless of whether initialization was successful, certain things
+ * need to get done before we exit.
+ */
+
+ if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
+ test_bit(AF_FIRST_INIT, &a->flags)) {
+ /*
+ * Reinitialization was performed during the first
+ * initialization. Only clear the chip reset flag so the
+ * original device polling is not cancelled.
+ */
+ if (!rslt)
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ } else {
+ /* First initialization or a subsequent re-init is complete. */
+ if (!rslt) {
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+ }
+
+
+ /* Enable deferred processing after the first initialization. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ clear_bit(AF_FIRST_INIT, &a->flags);
+
+ if (atomic_dec_return(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+ }
+ }
+
+ return rslt;
+}
+
+void esas2r_reset_adapter(struct esas2r_adapter *a)
+{
+ set_bit(AF_OS_RESET, &a->flags);
+ esas2r_local_reset_adapter(a);
+ esas2r_schedule_tasklet(a);
+}
+
+void esas2r_reset_chip(struct esas2r_adapter *a)
+{
+ if (!esas2r_is_adapter_present(a))
+ return;
+
+ /*
+ * Before we reset the chip, save off the VDA core dump. The VDA core
+ * dump is located in the upper 512KB of the onchip SRAM. Make sure
+ * to not overwrite a previous crash that was saved.
+ */
+ if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
+ !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
+ esas2r_read_mem_block(a,
+ a->fw_coredump_buff,
+ MW_DATA_ADDR_SRAM + 0x80000,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ set_bit(AF2_COREDUMP_SAVED, &a->flags2);
+ }
+
+ clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
+
+ /* Reset the chip */
+ if (a->pcid->revision == MVR_FREY_B2)
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
+ MU_CTL_IN_FULL_RST2);
+ else
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
+ MU_CTL_IN_FULL_RST);
+
+
+ /* Stall a little while to let the reset condition clear */
+ mdelay(10);
+}
+
+static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_POWER_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
+ esas2r_hdebug("Timeout waiting for power down");
+ break;
+ }
+ }
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+void esas2r_power_down(struct esas2r_adapter *a)
+{
+ set_bit(AF_POWER_MGT, &a->flags);
+ set_bit(AF_POWER_DOWN, &a->flags);
+
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ u32 starttime;
+ u32 doorbell;
+
+ /*
+ * We are currently running OK and will be reinitializing later.
+ * increment the disable count to coordinate with
+ * esas2r_init_adapter. We don't have to do this in degraded
+ * mode since we never enabled interrupts in the first place.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_disable_heartbeat(a);
+
+ /* wait for any VDA activity to clear before continuing */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for interface down");
+ break;
+ }
+ }
+
+ /*
+ * For versions of firmware that support it tell them the driver
+ * is powering down.
+ */
+ if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
+ esas2r_power_down_notify_firmware(a);
+ }
+
+ /* Suspend I/O processing. */
+ set_bit(AF_OS_RESET, &a->flags);
+ set_bit(AF_DISC_PENDING, &a->flags);
+ set_bit(AF_CHPRST_PENDING, &a->flags);
+
+ esas2r_process_adapter_reset(a);
+
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
+{
+ bool ret;
+
+ clear_bit(AF_POWER_DOWN, &a->flags);
+ esas2r_init_pci_cfg_space(a);
+ set_bit(AF_FIRST_INIT, &a->flags);
+ atomic_inc(&a->disable_cnt);
+
+ /* reinitialize the adapter */
+ ret = esas2r_check_adapter(a);
+ if (!esas2r_init_adapter_hw(a, init_poll))
+ ret = false;
+
+ /* send the reset asynchronous event */
+ esas2r_send_reset_ae(a, true);
+
+ /* clear this flag after initialization. */
+ clear_bit(AF_POWER_MGT, &a->flags);
+ return ret;
+}
+
+bool esas2r_is_adapter_present(struct esas2r_adapter *a)
+{
+ if (test_bit(AF_NOT_PRESENT, &a->flags))
+ return false;
+
+ if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
+ set_bit(AF_NOT_PRESENT, &a->flags);
+
+ return false;
+ }
+ return true;
+}
+
+const char *esas2r_get_model_name(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "ATTO ExpressSAS R680";
+
+ case ATTO_ESAS_R608:
+ return "ATTO ExpressSAS R608";
+
+ case ATTO_ESAS_R60F:
+ return "ATTO ExpressSAS R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "ATTO ExpressSAS R6F0";
+
+ case ATTO_ESAS_R644:
+ return "ATTO ExpressSAS R644";
+
+ case ATTO_ESAS_R648:
+ return "ATTO ExpressSAS R648";
+
+ case ATTO_TSSC_3808:
+ return "ATTO ThunderStream SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "ATTO ThunderStream SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "ATTO ThunderLink SH 1068";
+ }
+
+ return "ATTO SAS Controller";
+}
+
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "R680";
+
+ case ATTO_ESAS_R608:
+ return "R608";
+
+ case ATTO_ESAS_R60F:
+ return "R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "R6F0";
+
+ case ATTO_ESAS_R644:
+ return "R644";
+
+ case ATTO_ESAS_R648:
+ return "R648";
+
+ case ATTO_TSSC_3808:
+ return "SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "SH 1068";
+ }
+
+ return "unknown";
+}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
new file mode 100644
index 000000000000..f16d6bcf9bb6
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -0,0 +1,942 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_int.c
+ * esas2r interrupt handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Local function prototypes */
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
+static void esas2r_process_bus_reset(struct esas2r_adapter *a);
+
+/*
+ * Poll the adapter for interrupts and service them.
+ * This function handles both legacy interrupts and MSI.
+ */
+void esas2r_polled_interrupt(struct esas2r_adapter *a)
+{
+ u32 intstat;
+ u32 doorbell;
+
+ esas2r_disable_chip_interrupts(a);
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_POST_OUT) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
+ * schedules a TASKLET to process events, whereas the MSI handler just
+ * processes interrupt events directly.
+ */
+irqreturn_t esas2r_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+
+ if (!esas2r_adapter_interrupt_pending(a))
+ return IRQ_NONE;
+
+ set_bit(AF2_INT_PENDING, &a->flags2);
+ esas2r_schedule_tasklet(a);
+
+ return IRQ_HANDLED;
+}
+
+void esas2r_adapter_interrupt(struct esas2r_adapter *a)
+{
+ u32 doorbell;
+
+ if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ a->int_mask = ESAS2R_INT_STS_MASK;
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+}
+
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+ u32 intstat;
+ u32 doorbell;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (likely(intstat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(intstat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ /*
+ * Work around a chip bug and force a new MSI to be sent if one is
+ * still pending.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+
+ esas2r_do_tasklet_tasks(a);
+
+ return 1;
+}
+
+
+
+static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_ob_rsp *rsp)
+{
+
+ /*
+ * For I/O requests, only copy the response if an error
+ * occurred and setup a callback to do error processing.
+ */
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
+
+ if (rq->req_stat == RS_ABORTED) {
+ if (rq->timeout > RQ_MAX_TIMEOUT)
+ rq->req_stat = RS_TIMEOUT;
+ } else if (rq->req_stat == RS_SCSI_ERROR) {
+ u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
+
+ esas2r_trace("scsistatus: %x", scsistatus);
+
+ /* Any of these are a good result. */
+ if (scsistatus == SAM_STAT_GOOD || scsistatus ==
+ SAM_STAT_CONDITION_MET || scsistatus ==
+ SAM_STAT_INTERMEDIATE || scsistatus ==
+ SAM_STAT_INTERMEDIATE_CONDITION_MET) {
+ rq->req_stat = RS_SUCCESS;
+ rq->func_rsp.scsi_rsp.scsi_stat =
+ SAM_STAT_GOOD;
+ }
+ }
+ }
+}
+
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
+{
+ struct atto_vda_ob_rsp *rsp;
+ u32 rspput_ptr;
+ u32 rspget_ptr;
+ struct esas2r_request *rq;
+ u32 handle;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* Get the outbound limit and pointers */
+ rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
+ rspget_ptr = a->last_read;
+
+ esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
+
+ /* If we don't have anything to process, get out */
+ if (unlikely(rspget_ptr == rspput_ptr)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_trace_exit();
+ return;
+ }
+
+ /* Make sure the firmware is healthy */
+ if (unlikely(rspput_ptr >= a->list_size)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ esas2r_trace_exit();
+ return;
+ }
+
+ do {
+ rspget_ptr++;
+
+ if (rspget_ptr >= a->list_size)
+ rspget_ptr = 0;
+
+ rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
+ + rspget_ptr;
+
+ handle = rsp->handle;
+
+ /* Verify the handle range */
+ if (unlikely(LOWORD(handle) == 0
+ || LOWORD(handle) > num_requests +
+ num_ae_requests + 1)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ /* Get the request for this handle */
+ rq = a->req_table[LOWORD(handle)];
+
+ if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ list_del(&rq->req_list);
+
+ /* Get the completion status */
+ rq->req_stat = rsp->req_stat;
+
+ esas2r_trace("handle: %x", handle);
+ esas2r_trace("rq: %p", rq);
+ esas2r_trace("req_status: %x", rq->req_stat);
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ esas2r_handle_outbound_rsp_err(a, rq, rsp);
+ } else {
+ /*
+ * Copy the outbound completion struct for non-I/O
+ * requests.
+ */
+ memcpy(&rq->func_rsp, &rsp->func_rsp,
+ sizeof(rsp->func_rsp));
+ }
+
+ /* Queue the request for completion. */
+ list_add_tail(&rq->comp_list, &comp_list);
+
+ } while (rspget_ptr != rspput_ptr);
+
+ a->last_read = rspget_ptr;
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_trace_exit();
+}
+
+/*
+ * Perform all deferred processes for the adapter. Deferred
+ * processes can only be done while the current interrupt
+ * disable_cnt for the adapter is zero.
+ */
+void esas2r_do_deferred_processes(struct esas2r_adapter *a)
+{
+ int startreqs = 2;
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ /*
+ * startreqs is used to control starting requests
+ * that are on the deferred queue
+ * = 0 - do not start any requests
+ * = 1 - can start discovery requests
+ * = 2 - can start any request
+ */
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+ test_bit(AF_FLASHING, &a->flags))
+ startreqs = 0;
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
+ startreqs = 1;
+
+ atomic_inc(&a->disable_cnt);
+
+ /* Clear off the completed list to be processed later. */
+
+ if (esas2r_is_tasklet_pending(a)) {
+ esas2r_schedule_tasklet(a);
+
+ startreqs = 0;
+ }
+
+ /*
+ * If we can start requests then traverse the defer queue
+ * looking for requests to start or complete
+ */
+ if (startreqs && !list_empty(&a->defer_list)) {
+ LIST_HEAD(comp_list);
+ struct list_head *element, *next;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+
+ if (rq->req_stat != RS_PENDING) {
+ list_del(element);
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+ /*
+ * Process discovery and OS requests separately. We
+ * can't hold up discovery requests when discovery is
+ * pending. In general, there may be different sets of
+ * conditions for starting different types of requests.
+ */
+ else if (rq->req_type == RT_DISC_REQ) {
+ list_del(element);
+ esas2r_disc_local_start_request(a, rq);
+ } else if (startreqs == 2) {
+ list_del(element);
+ esas2r_local_start_request(a, rq);
+
+ /*
+ * Flashing could have been set by last local
+ * start
+ */
+ if (test_bit(AF_FLASHING, &a->flags))
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ }
+
+ atomic_dec(&a->disable_cnt);
+}
+
+/*
+ * Process an adapter reset (or one that is about to happen)
+ * by making sure all outstanding requests are completed that
+ * haven't been already.
+ */
+void esas2r_process_adapter_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ unsigned long flags;
+ struct esas2r_disc_context *dc;
+
+ LIST_HEAD(comp_list);
+ struct list_head *element;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* abort the active discovery, if any. */
+
+ if (rq->interrupt_cx) {
+ dc = (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ dc->disc_evt = 0;
+
+ clear_bit(AF_DISC_IN_PROG, &a->flags);
+ }
+
+ /*
+ * just clear the interrupt callback for now. it will be dequeued if
+ * and when we find it on the active queue and we don't want the
+ * callback called. also set the dummy completion callback in case we
+ * were doing an I/O request.
+ */
+
+ rq->interrupt_cx = NULL;
+ rq->interrupt_cb = NULL;
+
+ rq->comp_cb = esas2r_dummy_complete;
+
+ /* Reset the read and write pointers */
+
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+
+ /* Kill all the requests on the active list */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->req_stat == RS_STARTED)
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_process_bus_reset(a);
+ esas2r_trace_exit();
+}
+
+static void esas2r_process_bus_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ struct list_head *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ esas2r_hdebug("reset detected");
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* kill all the requests on the deferred queue */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ clear_bit(AF_OS_RESET, &a->flags);
+
+ esas2r_trace_exit();
+}
+
+static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
+{
+
+ clear_bit(AF_CHPRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
+ /*
+ * Make sure we don't get attempt more than 3 resets
+ * when the uptime between resets does not exceed one
+ * minute. This will stop any situation where there is
+ * really something wrong with the hardware. The way
+ * this works is that we start with uptime ticks at 0.
+ * Each time we do a reset, we add 20 seconds worth to
+ * the count. Each time a timer tick occurs, as long
+ * as a chip reset is not pending, we decrement the
+ * tick count. If the uptime ticks ever gets to 60
+ * seconds worth, we disable the adapter from that
+ * point forward. Three strikes, you're out.
+ */
+ if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
+ ESAS2R_CHP_UPTIME_MAX)) {
+ esas2r_hdebug("*** adapter disabled ***");
+
+ /*
+ * Ok, some kind of hard failure. Make sure we
+ * exit this loop with chip interrupts
+ * permanently disabled so we don't lock up the
+ * entire system. Also flag degraded mode to
+ * prevent the heartbeat from trying to recover.
+ */
+
+ set_bit(AF_DEGRADED_MODE, &a->flags);
+ set_bit(AF_DISABLED, &a->flags);
+ clear_bit(AF_CHPRST_PENDING, &a->flags);
+ clear_bit(AF_DISC_PENDING, &a->flags);
+
+ esas2r_disable_chip_interrupts(a);
+ a->int_mask = 0;
+ esas2r_process_adapter_reset(a);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Adapter disabled because of hardware failure");
+ } else {
+ bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
+
+ if (!alrdyrst)
+ /*
+ * Only disable interrupts if this is
+ * the first reset attempt.
+ */
+ esas2r_disable_chip_interrupts(a);
+
+ if ((test_bit(AF_POWER_MGT, &a->flags)) &&
+ !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
+ /*
+ * Don't reset the chip on the first
+ * deferred power up attempt.
+ */
+ } else {
+ esas2r_hdebug("*** resetting chip ***");
+ esas2r_reset_chip(a);
+ }
+
+ /* Kick off the reinitialization */
+ a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
+ a->chip_init_time = jiffies_to_msecs(jiffies);
+ if (!test_bit(AF_POWER_MGT, &a->flags)) {
+ esas2r_process_adapter_reset(a);
+
+ if (!alrdyrst) {
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt =
+ esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+ }
+ }
+
+ a->int_mask = 0;
+ }
+}
+
+static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
+{
+ while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+ /*
+ * Balance the enable in esas2r_initadapter_hw.
+ * Esas2r_power_down already took care of it for power
+ * management.
+ */
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_POWER_MGT, &a->flags))
+ esas2r_disable_chip_interrupts(a);
+
+ /* Reinitialize the chip. */
+ esas2r_check_adapter(a);
+ esas2r_init_adapter_hw(a, 0);
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ break;
+
+ if (test_bit(AF_POWER_MGT, &a->flags)) {
+ /* Recovery from power management. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ /* Chip reset during normal power up */
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "The firmware was reset during a normal power-up sequence");
+ } else {
+ /* Deferred power up complete. */
+ clear_bit(AF_POWER_MGT, &a->flags);
+ esas2r_send_reset_ae(a, true);
+ }
+ } else {
+ /* Recovery from online chip reset. */
+ if (test_bit(AF_FIRST_INIT, &a->flags)) {
+ /* Chip reset during driver load */
+ } else {
+ /* Chip reset after driver load */
+ esas2r_send_reset_ae(a, false);
+ }
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Recovering from a chip reset while the chip was online");
+ }
+
+ clear_bit(AF_CHPRST_STARTED, &a->flags);
+ esas2r_enable_chip_interrupts(a);
+
+ /*
+ * Clear this flag last! this indicates that the chip has been
+ * reset already during initialization.
+ */
+ clear_bit(AF_CHPRST_DETECTED, &a->flags);
+ }
+}
+
+
+/* Perform deferred tasks when chip interrupts are disabled */
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
+{
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+ test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ esas2r_chip_rst_needed_during_tasklet(a);
+
+ esas2r_handle_chip_rst_during_tasklet(a);
+ }
+
+ if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
+ esas2r_hdebug("hard resetting bus");
+
+ clear_bit(AF_BUSRST_NEEDED, &a->flags);
+
+ if (test_bit(AF_FLASHING, &a->flags))
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
+ else
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_RESET_BUS);
+ }
+
+ if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
+ esas2r_process_bus_reset(a);
+
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "scsi_report_bus_reset() called");
+
+ scsi_report_bus_reset(a->host, 0);
+
+ clear_bit(AF_BUSRST_DETECTED, &a->flags);
+ clear_bit(AF_BUSRST_PENDING, &a->flags);
+
+ esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
+ }
+
+ if (test_bit(AF_PORT_CHANGE, &a->flags)) {
+ clear_bit(AF_PORT_CHANGE, &a->flags);
+
+ esas2r_targ_db_report_changes(a);
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
+{
+ if (!(doorbell & DRBL_FORCE_INT)) {
+ esas2r_trace_enter();
+ esas2r_trace("doorbell: %x", doorbell);
+ }
+
+ /* First clear the doorbell bits */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
+
+ if (doorbell & DRBL_RESET_BUS)
+ set_bit(AF_BUSRST_DETECTED, &a->flags);
+
+ if (doorbell & DRBL_FORCE_INT)
+ clear_bit(AF_HEARTBEAT, &a->flags);
+
+ if (doorbell & DRBL_PANIC_REASON_MASK) {
+ esas2r_hdebug("*** Firmware Panic ***");
+ esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
+ }
+
+ if (doorbell & DRBL_FW_RESET) {
+ set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
+ esas2r_local_reset_adapter(a);
+ }
+
+ if (!(doorbell & DRBL_FORCE_INT))
+ esas2r_trace_exit();
+}
+
+void esas2r_force_interrupt(struct esas2r_adapter *a)
+{
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
+ DRBL_DRV_VER);
+}
+
+
+static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
+ u16 target, u32 length)
+{
+ struct esas2r_target *t = a->targetdb + target;
+ u32 cplen = length;
+ unsigned long flags;
+
+ if (cplen > sizeof(t->lu_event))
+ cplen = sizeof(t->lu_event);
+
+ esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
+ esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ t->new_target_state = TS_INVALID;
+
+ if (ae->lu.dwevent & VDAAE_LU_LOST) {
+ t->new_target_state = TS_NOT_PRESENT;
+ } else {
+ switch (ae->lu.bystate) {
+ case VDAAE_LU_NOT_PRESENT:
+ case VDAAE_LU_OFFLINE:
+ case VDAAE_LU_DELETED:
+ case VDAAE_LU_FACTORY_DISABLED:
+ t->new_target_state = TS_NOT_PRESENT;
+ break;
+
+ case VDAAE_LU_ONLINE:
+ case VDAAE_LU_DEGRADED:
+ t->new_target_state = TS_PRESENT;
+ break;
+ }
+ }
+
+ if (t->new_target_state != TS_INVALID) {
+ memcpy(&t->lu_event, &ae->lu, cplen);
+
+ esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
+ }
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+
+
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ union atto_vda_ae *ae =
+ (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
+ u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
+ union atto_vda_ae *last =
+ (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
+ + length);
+
+ esas2r_trace_enter();
+ esas2r_trace("length: %d", length);
+
+ if (length > sizeof(struct atto_vda_ae_data)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "The AE request response length (%p) is too long: %d",
+ rq, length);
+
+ esas2r_hdebug("aereq->length (0x%x) too long", length);
+ esas2r_bugon();
+
+ last = ae;
+ }
+
+ while (ae < last) {
+ u16 target;
+
+ esas2r_trace("ae: %p", ae);
+ esas2r_trace("ae->hdr: %p", &(ae->hdr));
+
+ length = ae->hdr.bylength;
+
+ if (length > (u32)((u8 *)last - (u8 *)ae)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "the async event length is invalid (%p): %d",
+ ae, length);
+
+ esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
+ esas2r_bugon();
+
+ break;
+ }
+
+ esas2r_nuxi_ae_data(ae);
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
+ sizeof(union atto_vda_ae));
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ if (ae->raid.dwflags & (VDAAE_GROUP_STATE
+ | VDAAE_RBLD_STATE
+ | VDAAE_MEMBER_CHG
+ | VDAAE_PART_CHG)) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "RAID event received - name:%s rebuild_state:%d group_state:%d",
+ ae->raid.acname,
+ ae->raid.byrebuild_state,
+ ae->raid.bygroup_state);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ esas2r_log(ESAS2R_LOG_INFO,
+ "LUN event received: event:%d target_id:%d LUN:%d state:%d",
+ ae->lu.dwevent,
+ ae->lu.id.tgtlun.wtarget_id,
+ ae->lu.id.tgtlun.bylun,
+ ae->lu.bystate);
+
+ target = ae->lu.id.tgtlun.wtarget_id;
+
+ if (target < ESAS2R_MAX_TARGETS)
+ esas2r_lun_event(a, ae, target, length);
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
+ break;
+
+ default:
+
+ /* Silently ignore the rest and let the apps deal with
+ * them.
+ */
+
+ break;
+ }
+
+ ae = (union atto_vda_ae *)((u8 *)ae + length);
+ }
+
+ /* Now requeue it. */
+ esas2r_start_ae_request(a, rq);
+ esas2r_trace_exit();
+}
+
+/* Send an asynchronous event for a chip reset or power management. */
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
+{
+ struct atto_vda_ae_hdr ae;
+
+ if (pwr_mgt)
+ ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
+ else
+ ae.bytype = VDAAE_HDR_TYPE_RESET;
+
+ ae.byversion = VDAAE_HDR_VER_0;
+ ae.byflags = 0;
+ ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
+
+ if (pwr_mgt)
+ esas2r_hdebug("*** sending power management AE ***");
+ else
+ esas2r_hdebug("*** sending reset AE ***");
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
+ sizeof(union atto_vda_ae));
+}
+
+void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{}
+
+static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 snslen, snslen2;
+
+ snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
+
+ if (snslen > rq->sense_len)
+ snslen = rq->sense_len;
+
+ if (snslen) {
+ if (rq->sense_buf)
+ memcpy(rq->sense_buf, rq->data_buf, snslen);
+ else
+ rq->sense_buf = (u8 *)rq->data_buf;
+
+ /* See about possible sense data */
+ if (snslen2 > 0x0c) {
+ u8 *s = (u8 *)rq->data_buf;
+
+ esas2r_trace_enter();
+
+ /* Report LUNS data has changed */
+ if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
+ esas2r_trace("rq->target_id: %d",
+ rq->target_id);
+ esas2r_target_state_changed(a, rq->target_id,
+ TS_LUN_CHANGE);
+ }
+
+ esas2r_trace("add_sense_key=%x", s[0x0c]);
+ esas2r_trace("add_sense_qual=%x", s[0x0d]);
+ esas2r_trace_exit();
+ }
+ }
+
+ rq->sense_len = snslen;
+}
+
+
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ if (rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ clear_bit(AF_FLASHING, &a->flags);
+
+ /* See if we setup a callback to do special processing */
+
+ if (rq->interrupt_cb) {
+ (*rq->interrupt_cb)(a, rq);
+
+ if (rq->req_stat == RS_PENDING) {
+ esas2r_start_request(a, rq);
+ return;
+ }
+ }
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ && unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_check_req_rsp_sense(a, rq);
+ esas2r_log_request_failure(a, rq);
+ }
+
+ (*rq->comp_cb)(a, rq);
+}
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
new file mode 100644
index 000000000000..a8df916cd57a
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -0,0 +1,877 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_io.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct esas2r_target *t = NULL;
+ struct esas2r_request *startrq = rq;
+ unsigned long flags;
+
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
+ test_bit(AF_POWER_DOWN, &a->flags))) {
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ rq->req_stat = RS_SEL2;
+ else
+ rq->req_stat = RS_DEGRADED;
+ } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ t = a->targetdb + rq->target_id;
+
+ if (unlikely(t >= a->targetdb_end
+ || !(t->flags & TF_USED))) {
+ rq->req_stat = RS_SEL;
+ } else {
+ /* copy in the target ID. */
+ rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
+
+ /*
+ * Test if we want to report RS_SEL for missing target.
+ * Note that if AF_DISC_PENDING is set than this will
+ * go on the defer queue.
+ */
+ if (unlikely(t->target_state != TS_PRESENT &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
+ rq->req_stat = RS_SEL;
+ }
+ }
+
+ if (unlikely(rq->req_stat != RS_PENDING)) {
+ esas2r_complete_request(a, rq);
+ return;
+ }
+
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ esas2r_trace("rq->target_id=%d", rq->target_id);
+ esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (likely(list_empty(&a->defer_list) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_FLASHING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)))
+ esas2r_local_start_request(a, startrq);
+ else
+ list_add_tail(&startrq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+/*
+ * Starts the specified request. all requests have RS_PENDING set when this
+ * routine is called. The caller is usually esas2r_start_request, but
+ * esas2r_do_deferred_processes will start request that are deferred.
+ *
+ * The caller must ensure that requests can be started.
+ *
+ * esas2r_start_request will defer a request if there are already requests
+ * waiting or there is a chip reset pending. once the reset condition clears,
+ * esas2r_do_deferred_processes will call this function to start the request.
+ *
+ * When a request is started, it is placed on the active list and queued to
+ * the controller.
+ */
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq:%p", rq->vrq);
+ esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
+
+ if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
+ set_bit(AF_FLASHING, &a->flags);
+
+ list_add_tail(&rq->req_list, &a->active_list);
+ esas2r_start_vda_request(a, rq);
+ esas2r_trace_exit();
+ return;
+}
+
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_inbound_list_source_entry *element;
+ u32 dw;
+
+ rq->req_stat = RS_STARTED;
+ /*
+ * Calculate the inbound list entry location and the current state of
+ * toggle bit.
+ */
+ a->last_write++;
+ if (a->last_write >= a->list_size) {
+ a->last_write = 0;
+ /* update the toggle bit */
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+ clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ else
+ set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
+ }
+
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
+ virt_addr
+ + a->last_write;
+
+ /* Set the VDA request size if it was never modified */
+ if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
+ rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
+
+ element->address = cpu_to_le64(rq->vrq_md->phys_addr);
+ element->length = cpu_to_le32(rq->vda_req_sz);
+
+ /* Update the write pointer */
+ dw = a->last_write;
+
+ if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+ dw |= MU_ILW_TOGGLE;
+
+ esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
+ esas2r_trace("dw:%x", dw);
+ esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the s/g context. The caller must initialize
+ * context prior to the initial call by calling esas2r_sgc_init().
+ */
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ union atto_vda_req *vrq = rq->vrq;
+
+ while (sgc->length) {
+ u32 rem = 0;
+ u64 addr;
+ u32 len;
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* if current length is more than what's left, stop there */
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* limit to a round number less than the maximum length */
+ if (len > SGE_LEN_MAX) {
+ /*
+ * Save the remainder of the split. Whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - SGE_LEN_MAX;
+ len = SGE_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
+ u8 sgelen;
+ struct esas2r_mem_desc *sgl;
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /* Calculate the length of the last SGE filled in */
+ sgelen = (u8)((u8 *)sgc->sge.a64.curr
+ - (u8 *)sgc->sge.a64.last);
+
+ /*
+ * Copy the last SGE filled in to the first entry of
+ * the new SGL to make room for the chain entry.
+ */
+ memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
+
+ /* Figure out the new curr pointer in the new segment */
+ sgc->sge.a64.curr =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
+ sgelen);
+
+ /* Set the limit pointer and build the chain entry */
+ sgc->sge.a64.limit =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr
+ + sgl_page_size
+ - sizeof(struct
+ atto_vda_sge));
+ sgc->sge.a64.last->length = cpu_to_le32(
+ SGE_CHAIN | SGE_ADDR_64);
+ sgc->sge.a64.last->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Now, if there was a previous chain entry, then
+ * update it to contain the length of this segment
+ * and size of this chain. otherwise this is the
+ * first SGL, so set the chain_offset in the request.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |=
+ cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.
+ last + 1)
+ - (u8 *)rq->sg_table->
+ virt_addr)
+ + sizeof(struct atto_vda_sge) *
+ LOBIT(SGE_CHAIN_SZ));
+ } else {
+ vrq->scsi.chain_offset = (u8)
+ ((u8 *)sgc->
+ sge.a64.last -
+ (u8 *)vrq);
+
+ /*
+ * This is the first SGL, so set the
+ * chain_offset and the VDA request size in
+ * the request.
+ */
+ rq->vda_req_sz =
+ (vrq->scsi.chain_offset +
+ sizeof(struct atto_vda_sge) +
+ 3)
+ / sizeof(u32);
+ }
+
+ /*
+ * Remember this so when we get a new SGL filled in we
+ * can update the length of this chain entry.
+ */
+ sgc->sge.a64.chain = sgc->sge.a64.last;
+
+ /* Now link the new SGL onto the primary request. */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+ }
+
+ /* Update last one filled in */
+ sgc->sge.a64.last = sgc->sge.a64.curr;
+
+ /* Build the new SGE and update the S/G context */
+ sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
+ sgc->sge.a64.curr->address = cpu_to_le32(addr);
+ sgc->sge.a64.curr++;
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ /* Mark the end of the SGL */
+ sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
+
+ /*
+ * If there was a previous chain entry, update the length to indicate
+ * the length of this last segment.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |= cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.curr) -
+ (u8 *)rq->sg_table->virt_addr));
+ } else {
+ u16 reqsize;
+
+ /*
+ * The entire VDA request was not used so lets
+ * set the size of the VDA request to be DMA'd
+ */
+ reqsize =
+ ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
+ + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
+
+ /*
+ * Only update the request size if it is bigger than what is
+ * already there. We can come in here twice for some management
+ * commands.
+ */
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+ }
+ return true;
+}
+
+
+/*
+ * Create PRD list for each I-block consumed by the command. This routine
+ * determines how much data is required from each I-block being consumed
+ * by the command. The first and last I-blocks can be partials and all of
+ * the I-blocks in between are for a full I-block of data.
+ *
+ * The interleave size is used to determine the number of bytes in the 1st
+ * I-block and the remaining I-blocks are what remeains.
+ */
+static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u64 addr;
+ u32 len;
+ struct esas2r_mem_desc *sgl;
+ u32 numchain = 1;
+ u32 rem = 0;
+
+ while (sgc->length) {
+ /* Get the next address/length pair */
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* If current length is more than what's left, stop there */
+
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* Limit to a round number less than the maximum length */
+
+ if (len > PRD_LEN_MAX) {
+ /*
+ * Save the remainder of the split. whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - PRD_LEN_MAX;
+ len = PRD_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (sgc->sge.prd.sge_cnt == 0) {
+ if (len == sgc->length) {
+ /*
+ * We only have 1 PRD entry left.
+ * It can be placed where the chain
+ * entry would have gone
+ */
+
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(
+ PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Adjust length related fields */
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /* We use the reserved chain entry for data */
+ numchain = 0;
+
+ break;
+ }
+
+ if (sgc->sge.prd.chain) {
+ /*
+ * Fill # of entries of current SGL in previous
+ * chain the length of this current SGL may not
+ * full.
+ */
+
+ sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
+ sgc->sge.prd.sgl_max_cnt);
+ }
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /*
+ * Link the new SGL onto the chain
+ * They are in reverse order
+ */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+
+ /*
+ * An SGL was just filled in and we are starting
+ * a new SGL. Prime the chain of the ending SGL with
+ * info that points to the new SGL. The length gets
+ * filled in when the new SGL is filled or ended
+ */
+
+ sgc->sge.prd.chain = sgc->sge.prd.curr;
+
+ sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
+ sgc->sge.prd.chain->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Start a new segment.
+ * Take one away and save for chain SGE
+ */
+
+ sgc->sge.prd.curr =
+ (struct atto_physical_region_description *)sgl
+ ->
+ virt_addr;
+ sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
+ }
+
+ sgc->sge.prd.sge_cnt--;
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Used another element. Point to the next one */
+
+ sgc->sge.prd.curr++;
+
+ /* Adjust length related fields */
+
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ if (!list_empty(&rq->sg_table_head)) {
+ if (sgc->sge.prd.chain) {
+ sgc->sge.prd.chain->ctl_len |=
+ cpu_to_le32(sgc->sge.prd.sgl_max_cnt
+ - sgc->sge.prd.sge_cnt
+ - numchain);
+ }
+ }
+
+ return true;
+}
+
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u32 len = sgc->length;
+ struct esas2r_target *t = a->targetdb + rq->target_id;
+ u8 is_i_o = 0;
+ u16 reqsize;
+ struct atto_physical_region_description *curr_iblk_chn;
+ u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
+
+ /*
+ * extract LBA from command so we can determine
+ * the I-Block boundary
+ */
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && t->target_state == TS_PRESENT
+ && !(t->flags & TF_PASS_THRU)) {
+ u32 lbalo = 0;
+
+ switch (rq->vrq->scsi.cdb[0]) {
+ case READ_16:
+ case WRITE_16:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[9],
+ cdb[8]),
+ MAKEWORD(cdb[7],
+ cdb[6]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_12:
+ case WRITE_12:
+ case READ_10:
+ case WRITE_10:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[5],
+ cdb[4]),
+ MAKEWORD(cdb[3],
+ cdb[2]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_6:
+ case WRITE_6:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[3],
+ cdb[2]),
+ MAKEWORD(cdb[1] & 0x1F,
+ 0));
+ is_i_o = 1;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (is_i_o) {
+ u32 startlba;
+
+ rq->vrq->scsi.iblk_cnt_prd = 0;
+
+ /* Determine size of 1st I-block PRD list */
+ startlba = t->inter_block - (lbalo & (t->inter_block -
+ 1));
+ sgc->length = startlba * t->block_size;
+
+ /* Chk if the 1st iblk chain starts at base of Iblock */
+ if ((lbalo & (t->inter_block - 1)) == 0)
+ rq->flags |= RF_1ST_IBLK_BASE;
+
+ if (sgc->length > len)
+ sgc->length = len;
+ } else {
+ sgc->length = len;
+ }
+ } else {
+ sgc->length = len;
+ }
+
+ /* get our starting chain address */
+
+ curr_iblk_chn =
+ (struct atto_physical_region_description *)sgc->sge.a64.curr;
+
+ sgc->sge.prd.sgl_max_cnt = sgl_page_size /
+ sizeof(struct
+ atto_physical_region_description);
+
+ /* create all of the I-block PRD lists */
+
+ while (len) {
+ sgc->sge.prd.sge_cnt = 0;
+ sgc->sge.prd.chain = NULL;
+ sgc->sge.prd.curr = curr_iblk_chn;
+
+ /* increment to next I-Block */
+
+ len -= sgc->length;
+
+ /* go build the next I-Block PRD list */
+
+ if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
+ return false;
+
+ curr_iblk_chn++;
+
+ if (is_i_o) {
+ rq->vrq->scsi.iblk_cnt_prd++;
+
+ if (len > t->inter_byte)
+ sgc->length = t->inter_byte;
+ else
+ sgc->length = len;
+ }
+ }
+
+ /* figure out the size used of the VDA request */
+
+ reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
+ / sizeof(u32);
+
+ /*
+ * only update the request size if it is bigger than what is
+ * already there. we can come in here twice for some management
+ * commands.
+ */
+
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+
+ return true;
+}
+
+static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
+{
+ u32 delta = currtime - a->chip_init_time;
+
+ if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
+ /* Wait before accessing registers */
+ } else if (delta >= ESAS2R_CHPRST_TIME) {
+ /*
+ * The last reset failed so try again. Reset
+ * processing will give up after three tries.
+ */
+ esas2r_local_reset_adapter(a);
+ } else {
+ /* We can now see if the firmware is ready */
+ u32 doorbell;
+
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
+ esas2r_force_interrupt(a);
+ } else {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /* Driver supports API version 0 and 1 */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (ver == DRBL_FW_VER_0) {
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ set_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ set_bit(AF_CHPRST_DETECTED, &a->flags);
+ clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ esas2r_local_reset_adapter(a);
+ }
+ }
+ }
+}
+
+
+/* This function must be called once per timer tick */
+void esas2r_timer_tick(struct esas2r_adapter *a)
+{
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 deltatime = currtime - a->last_tick_time;
+
+ a->last_tick_time = currtime;
+
+ /* count down the uptime */
+ if (a->chip_uptime &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
+ if (deltatime >= a->chip_uptime)
+ a->chip_uptime = 0;
+ else
+ a->chip_uptime -= deltatime;
+ }
+
+ if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
+ if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
+ !test_bit(AF_CHPRST_DETECTED, &a->flags))
+ esas2r_handle_pending_reset(a, currtime);
+ } else {
+ if (test_bit(AF_DISC_PENDING, &a->flags))
+ esas2r_disc_check_complete(a);
+ if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
+ if (test_bit(AF_HEARTBEAT, &a->flags)) {
+ if ((currtime - a->heartbeat_time) >=
+ ESAS2R_HEARTBEAT_TIME) {
+ clear_bit(AF_HEARTBEAT, &a->flags);
+ esas2r_hdebug("heartbeat failed");
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "heartbeat failed");
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ }
+ } else {
+ set_bit(AF_HEARTBEAT, &a->flags);
+ a->heartbeat_time = currtime;
+ esas2r_force_interrupt(a);
+ }
+ }
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Send the specified task management function to the target and LUN
+ * specified in rqaux. in addition, immediately abort any commands that
+ * are queued but not sent to the device according to the rules specified
+ * by the task management function.
+ */
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func)
+{
+ u16 targetid = rqaux->target_id;
+ u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
+ bool ret = false;
+ struct esas2r_request *rq;
+ struct list_head *next, *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+ esas2r_trace("rqaux:%p", rqaux);
+ esas2r_trace("task_mgt_func:%x", task_mgt_func);
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* search the defer queue looking for requests for the device */
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) { /* target reset */
+ /* Found a request affected by the task management */
+ if (rq->req_stat == RS_PENDING) {
+ /*
+ * The request is pending or waiting. We can
+ * safelycomplete the request now.
+ */
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list,
+ &comp_list);
+ }
+ }
+ }
+
+ /* Send the task management request to the firmware */
+ rqaux->sense_len = 0;
+ rqaux->vrq->scsi.length = 0;
+ rqaux->target_id = targetid;
+ rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
+ memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
+ rqaux->vrq->scsi.flags |=
+ cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
+
+ if (test_bit(AF_FLASHING, &a->flags)) {
+ /* Assume success. if there are active requests, return busy */
+ rqaux->req_stat = RS_SUCCESS;
+
+ list_for_each_safe(element, next, &a->active_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) /* target reset */
+ rqaux->req_stat = RS_BUSY;
+ }
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (!test_bit(AF_FLASHING, &a->flags))
+ esas2r_start_request(a, rqaux);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+void esas2r_reset_bus(struct esas2r_adapter *a)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
+
+ if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+ !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+ !test_bit(AF_DISC_PENDING, &a->flags)) {
+ set_bit(AF_BUSRST_NEEDED, &a->flags);
+ set_bit(AF_BUSRST_PENDING, &a->flags);
+ set_bit(AF_OS_RESET, &a->flags);
+
+ esas2r_schedule_tasklet(a);
+ }
+}
+
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq:%p", rq);
+ list_del_init(&rq->req_list);
+ if (rq->timeout > RQ_MAX_TIMEOUT) {
+ /*
+ * The request timed out, but we could not abort it because a
+ * chip reset occurred. Return busy status.
+ */
+ rq->req_stat = RS_BUSY;
+ esas2r_trace_exit();
+ return true;
+ }
+
+ rq->req_stat = status;
+ esas2r_trace_exit();
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
new file mode 100644
index 000000000000..d89a0277a8e1
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -0,0 +1,2110 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_ioctl.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * Buffered ioctl handlers. A buffered ioctl is one which requires that we
+ * allocate a DMA-able memory area to communicate with the firmware. In
+ * order to prevent continually allocating and freeing consistent memory,
+ * we will allocate a global buffer the first time we need it and re-use
+ * it for subsequent ioctl calls that require it.
+ */
+
+u8 *esas2r_buffered_ioctl;
+dma_addr_t esas2r_buffered_ioctl_addr;
+u32 esas2r_buffered_ioctl_size;
+struct pci_dev *esas2r_buffered_ioctl_pcid;
+
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *,
+ struct esas2r_sg_context *,
+ void *);
+typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *, void *);
+
+struct esas2r_buffered_ioctl {
+ struct esas2r_adapter *a;
+ void *ioctl;
+ u32 length;
+ u32 control_code;
+ u32 offset;
+ BUFFERED_IOCTL_CALLBACK
+ callback;
+ void *context;
+ BUFFERED_IOCTL_DONE_CALLBACK
+ done_callback;
+ void *done_context;
+
+};
+
+static void complete_fm_api_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fm_api_command_done = 1;
+ wake_up_interruptible(&a->fm_api_waiter);
+}
+
+/* Callbacks for building scatter/gather lists for FM API requests */
+static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.phys + offset;
+ return a->firmware.orig_len - offset;
+}
+
+static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.header_buff_phys + offset;
+ return sizeof(struct esas2r_flash_img) - offset;
+}
+
+/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
+static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_request *rq;
+
+ if (down_interruptible(&a->fm_api_semaphore)) {
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ up(&a->fm_api_semaphore);
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ if (fi == &a->firmware.header) {
+ a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)sizeof(
+ struct
+ esas2r_flash_img),
+ (dma_addr_t *)&a->
+ firmware.
+ header_buff_phys,
+ GFP_KERNEL);
+
+ if (a->firmware.header_buff == NULL) {
+ esas2r_debug("failed to allocate header buffer!");
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ memcpy(a->firmware.header_buff, fi,
+ sizeof(struct esas2r_flash_img));
+ a->save_offset = a->firmware.header_buff;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api_header;
+ } else {
+ a->save_offset = (u8 *)fi;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api;
+ }
+
+ rq->comp_cb = complete_fm_api_req;
+ a->fm_api_command_done = 0;
+ a->fm_api_sgc.cur_offset = a->save_offset;
+
+ if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
+ &a->fm_api_sgc))
+ goto all_done;
+
+ /* Now wait around for it to complete. */
+ while (!a->fm_api_command_done)
+ wait_event_interruptible(a->fm_api_waiter,
+ a->fm_api_command_done);
+all_done:
+ if (fi == &a->firmware.header) {
+ memcpy(fi, a->firmware.header_buff,
+ sizeof(struct esas2r_flash_img));
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)sizeof(struct esas2r_flash_img),
+ a->firmware.header_buff,
+ (dma_addr_t)a->firmware.header_buff_phys);
+ }
+
+ up(&a->fm_api_semaphore);
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+ return;
+
+}
+
+static void complete_nvr_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->nvram_command_done = 1;
+ wake_up_interruptible(&a->nvram_waiter);
+}
+
+/* Callback for building scatter/gather lists for buffered ioctls */
+static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
+ u64 *addr)
+{
+ int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
+
+ (*addr) = esas2r_buffered_ioctl_addr + offset;
+ return esas2r_buffered_ioctl_size - offset;
+}
+
+static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->buffered_ioctl_done = 1;
+ wake_up_interruptible(&a->buffered_ioctl_waiter);
+}
+
+static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
+{
+ struct esas2r_adapter *a = bi->a;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ u8 result = IOCTL_SUCCESS;
+
+ if (down_interruptible(&buffered_ioctl_semaphore))
+ return IOCTL_OUT_OF_RESOURCES;
+
+ /* allocate a buffer or use the existing buffer. */
+ if (esas2r_buffered_ioctl) {
+ if (esas2r_buffered_ioctl_size < bi->length) {
+ /* free the too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+
+ goto allocate_buffer;
+ }
+ } else {
+allocate_buffer:
+ esas2r_buffered_ioctl_size = bi->length;
+ esas2r_buffered_ioctl_pcid = a->pcid;
+ esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ esas2r_buffered_ioctl_size,
+ &
+ esas2r_buffered_ioctl_addr,
+ GFP_KERNEL);
+ }
+
+ if (!esas2r_buffered_ioctl) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate %d bytes of consistent memory "
+ "for a buffered ioctl!",
+ bi->length);
+
+ esas2r_debug("buffered ioctl alloc failure");
+ result = IOCTL_OUT_OF_RESOURCES;
+ goto exit_cleanly;
+ }
+
+ memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate an internal request");
+
+ result = IOCTL_OUT_OF_RESOURCES;
+ esas2r_debug("buffered ioctl - no requests");
+ goto exit_cleanly;
+ }
+
+ a->buffered_ioctl_done = 0;
+ rq->comp_cb = complete_buffered_ioctl_req;
+ sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
+ sgc.length = esas2r_buffered_ioctl_size;
+
+ if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
+ /* completed immediately, no need to wait */
+ a->buffered_ioctl_done = 0;
+ goto free_andexit_cleanly;
+ }
+
+ /* now wait around for it to complete. */
+ while (!a->buffered_ioctl_done)
+ wait_event_interruptible(a->buffered_ioctl_waiter,
+ a->buffered_ioctl_done);
+
+free_andexit_cleanly:
+ if (result == IOCTL_SUCCESS && bi->done_callback)
+ (*bi->done_callback)(a, rq, bi->done_context);
+
+ esas2r_free_request(a, rq);
+
+exit_cleanly:
+ if (result == IOCTL_SUCCESS)
+ memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
+
+ up(&buffered_ioctl_semaphore);
+ return result;
+}
+
+/* SMP ioctl support */
+static int smp_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_ioctl_smp *si =
+ (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ si->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = si;
+ bi.length = sizeof(struct atto_ioctl_smp)
+ + le32_to_cpu(si->req_length)
+ + le32_to_cpu(si->rsp_length);
+ bi.offset = 0;
+ bi.callback = smp_ioctl_callback;
+ return handle_buffered_ioctl(&bi);
+}
+
+
+/* CSMI ioctl support */
+static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
+ rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
+
+ /* Now call the original completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+/* Tunnel a CSMI IOCTL to the back end driver for processing. */
+static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
+ union atto_ioctl_csmi *ci,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ u32 ctrl_code,
+ u16 target_id)
+{
+ struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return false;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
+ ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
+ ioctl->csmi.target_id = cpu_to_le16(target_id);
+ ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+
+ /*
+ * Always usurp the completion callback since the interrupt callback
+ * mechanism may be used.
+ */
+ rq->aux_req_cx = ci;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
+
+ if (!esas2r_build_sg_list(a, rq, sgc))
+ return false;
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static bool check_lun(struct scsi_lun lun)
+{
+ bool result;
+
+ result = ((lun.scsi_lun[7] == 0) &&
+ (lun.scsi_lun[6] == 0) &&
+ (lun.scsi_lun[5] == 0) &&
+ (lun.scsi_lun[4] == 0) &&
+ (lun.scsi_lun[3] == 0) &&
+ (lun.scsi_lun[2] == 0) &&
+/* Byte 1 is intentionally skipped */
+ (lun.scsi_lun[0] == 0));
+
+ return result;
+}
+
+static int csmi_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+ u8 path = 0;
+ u8 tid = 0;
+ u8 lun = 0;
+ u32 sts = CSMI_STS_SUCCESS;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
+ struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
+
+ path = gda->path_id;
+ tid = gda->target_id;
+ lun = gda->lun;
+ } else if (ci->control_code == CSMI_CC_TASK_MGT) {
+ struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
+
+ path = tm->path_id;
+ tid = tm->target_id;
+ lun = tm->lun;
+ }
+
+ if (path > 0) {
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
+ CSMI_STS_INV_PARAM);
+ return false;
+ }
+
+ rq->target_id = tid;
+ rq->vrq->scsi.flags |= cpu_to_le32(lun);
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->description, esas2r_get_model_name(a));
+ gdi->csmi_major_rev = CSMI_MAJOR_REV;
+ gdi->csmi_minor_rev = CSMI_MINOR_REV;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_CFG:
+ {
+ struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
+
+ gcc->base_io_addr = 0;
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
+ &gcc->base_memaddr_lo);
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
+ &gcc->base_memaddr_hi);
+ gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
+ a->pcid->subsystem_vendor);
+ gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
+ gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
+ gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
+ gcc->pci_addr.bus_num = a->pcid->bus->number;
+ gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
+ gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
+
+ memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
+
+ gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
+ gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
+ gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
+ gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
+ gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
+ gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
+ gcc->bios_build_rev = LOWORD(a->flash_ver);
+
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
+ | CSMI_CNTLRF_SATA_HBA;
+ else
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
+ | CSMI_CNTLRF_SATA_RAID;
+
+ gcc->rrom_major_rev = 0;
+ gcc->rrom_minor_rev = 0;
+ gcc->rrom_build_rev = 0;
+ gcc->rrom_release_rev = 0;
+ gcc->rrom_biosmajor_rev = 0;
+ gcc->rrom_biosminor_rev = 0;
+ gcc->rrom_biosbuild_rev = 0;
+ gcc->rrom_biosrelease_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_STS:
+ {
+ struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ gcs->status = CSMI_CNTLR_STS_FAILED;
+ else
+ gcs->status = CSMI_CNTLR_STS_GOOD;
+
+ gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
+ break;
+ }
+
+ case CSMI_CC_FW_DOWNLOAD:
+ case CSMI_CC_GET_RAID_INFO:
+ case CSMI_CC_GET_RAID_CFG:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+
+ case CSMI_CC_SMP_PASSTHRU:
+ case CSMI_CC_SSP_PASSTHRU:
+ case CSMI_CC_STP_PASSTHRU:
+ case CSMI_CC_GET_PHY_INFO:
+ case CSMI_CC_SET_PHY_INFO:
+ case CSMI_CC_GET_LINK_ERRORS:
+ case CSMI_CC_GET_SATA_SIG:
+ case CSMI_CC_GET_CONN_INFO:
+ case CSMI_CC_PHY_CTRL:
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ ESAS2R_TARG_ID_INV)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ struct scsi_lun lun;
+
+ memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
+
+ if (!check_lun(lun)) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ /* make sure the device is present */
+ spin_lock_irqsave(&a->mem_lock, flags);
+ t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (t == NULL) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ gsa->host_index = 0xFF;
+ gsa->lun = gsa->sas_lun[1];
+ rq->target_id = esas2r_targ_get_id(t, a);
+ break;
+ }
+
+ case CSMI_CC_GET_DEV_ADDR:
+ {
+ struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || t->sas_addr == 0) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ /* fill in the result */
+ *(u64 *)gda->sas_addr = t->sas_addr;
+ memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
+ gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+ break;
+ }
+
+ case CSMI_CC_TASK_MGT:
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || !(t->flags & TF_PASS_THRU)) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ t->phys_targ_id)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ default:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+ }
+
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
+
+ return false;
+}
+
+
+static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi =
+ &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->name, ESAS2R_VERSION_STR);
+
+ gdi->major_rev = ESAS2R_MAJOR_REV;
+ gdi->minor_rev = ESAS2R_MINOR_REV;
+ gdi->build_rev = 0;
+ gdi->release_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
+ CSMI_STS_SUCCESS) {
+ gsa->target_id = rq->target_id;
+ gsa->path_id = 0;
+ }
+
+ break;
+ }
+ }
+
+ ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
+}
+
+
+static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = &ci->data;
+ bi.length = sizeof(union atto_ioctl_csmi);
+ bi.offset = 0;
+ bi.callback = csmi_ioctl_callback;
+ bi.context = ci;
+ bi.done_callback = csmi_ioctl_done_callback;
+ bi.done_context = ci;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+/* ATTO HBA ioctl support */
+
+/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
+static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
+ struct atto_ioctl *hi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ u8 sts = ATTO_SPT_RS_FAILED;
+
+ spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
+ spt->sense_length = rq->sense_len;
+ spt->residual_length =
+ le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
+
+ switch (rq->req_stat) {
+ case RS_SUCCESS:
+ case RS_SCSI_ERROR:
+ sts = ATTO_SPT_RS_SUCCESS;
+ break;
+ case RS_UNDERRUN:
+ sts = ATTO_SPT_RS_UNDERRUN;
+ break;
+ case RS_OVERRUN:
+ sts = ATTO_SPT_RS_OVERRUN;
+ break;
+ case RS_SEL:
+ case RS_SEL2:
+ sts = ATTO_SPT_RS_NO_DEVICE;
+ break;
+ case RS_NO_LUN:
+ sts = ATTO_SPT_RS_NO_LUN;
+ break;
+ case RS_TIMEOUT:
+ sts = ATTO_SPT_RS_TIMEOUT;
+ break;
+ case RS_DEGRADED:
+ sts = ATTO_SPT_RS_DEGRADED;
+ break;
+ case RS_BUSY:
+ sts = ATTO_SPT_RS_BUSY;
+ break;
+ case RS_ABORTED:
+ sts = ATTO_SPT_RS_ABORTED;
+ break;
+ case RS_RESET:
+ sts = ATTO_SPT_RS_BUS_RESET;
+ break;
+ }
+
+ spt->req_status = sts;
+
+ /* Update the target ID to the next one present. */
+ spt->target_id =
+ esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
+
+ /* Done, call the completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+static int hba_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ void *context)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ hi->status = ATTO_STS_SUCCESS;
+
+ switch (hi->function) {
+ case ATTO_FUNC_GET_ADAP_INFO:
+ {
+ u8 *class_code = (u8 *)&a->pcid->class;
+
+ struct atto_hba_get_adapter_info *gai =
+ &hi->data.get_adap_info;
+ int pcie_cap_reg;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_INFO0;
+ break;
+ }
+
+ memset(gai, 0, sizeof(*gai));
+
+ gai->pci.vendor_id = a->pcid->vendor;
+ gai->pci.device_id = a->pcid->device;
+ gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
+ gai->pci.ss_device_id = a->pcid->subsystem_device;
+ gai->pci.class_code[0] = class_code[0];
+ gai->pci.class_code[1] = class_code[1];
+ gai->pci.class_code[2] = class_code[2];
+ gai->pci.rev_id = a->pcid->revision;
+ gai->pci.bus_num = a->pcid->bus->number;
+ gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
+ gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (pcie_cap_reg) {
+ u16 stat;
+ u32 caps;
+
+ pci_read_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKSTA,
+ &stat);
+ pci_read_config_dword(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKCAP,
+ &caps);
+
+ gai->pci.link_speed_curr =
+ (u8)(stat & PCI_EXP_LNKSTA_CLS);
+ gai->pci.link_speed_max =
+ (u8)(caps & PCI_EXP_LNKCAP_SLS);
+ gai->pci.link_width_curr =
+ (u8)((stat & PCI_EXP_LNKSTA_NLW)
+ >> PCI_EXP_LNKSTA_NLW_SHIFT);
+ gai->pci.link_width_max =
+ (u8)((caps & PCI_EXP_LNKCAP_MLW)
+ >> 4);
+ }
+
+ gai->pci.msi_vector_cnt = 1;
+
+ if (a->pcid->msix_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
+ else if (a->pcid->msi_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
+ else
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
+
+ gai->adap_type = ATTO_GAI_AT_ESASRAID2;
+
+ if (test_bit(AF2_THUNDERLINK, &a->flags2))
+ gai->adap_type = ATTO_GAI_AT_TLSASHBA;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
+
+ gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
+ ATTO_GAI_AF_DEVADDR_SUPP;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R60F
+ || a->pcid->subsystem_device == ATTO_ESAS_R608
+ || a->pcid->subsystem_device == ATTO_ESAS_R644
+ || a->pcid->subsystem_device == ATTO_TSSC_3808E)
+ gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
+
+ gai->num_ports = ESAS2R_NUM_PHYS;
+ gai->num_phys = ESAS2R_NUM_PHYS;
+
+ strcpy(gai->firmware_rev, a->fw_rev);
+ strcpy(gai->flash_rev, a->flash_rev);
+ strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
+ strcpy(gai->model_name, esas2r_get_model_name(a));
+
+ gai->num_targets = ESAS2R_MAX_TARGETS;
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = gai->num_targets;
+ gai->num_lunsper_targ = 256;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
+ || a->pcid->subsystem_device == ATTO_ESAS_R60F)
+ gai->num_connectors = 4;
+ else
+ gai->num_connectors = 2;
+
+ gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
+
+ gai->num_targets_backend = a->num_targets_backend;
+
+ gai->tunnel_flags = a->ioctl_tunnel
+ & (ATTO_GAI_TF_MEM_RW
+ | ATTO_GAI_TF_TRACE
+ | ATTO_GAI_TF_SCSI_PASS_THRU
+ | ATTO_GAI_TF_GET_DEV_ADDR
+ | ATTO_GAI_TF_PHY_CTRL
+ | ATTO_GAI_TF_CONN_CTRL
+ | ATTO_GAI_TF_GET_DEV_INFO);
+ break;
+ }
+
+ case ATTO_FUNC_GET_ADAP_ADDR:
+ {
+ struct atto_hba_get_adapter_address *gaa =
+ &hi->data.get_adap_addr;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_ADDR0;
+ } else if (gaa->addr_type == ATTO_GAA_AT_PORT
+ || gaa->addr_type == ATTO_GAA_AT_NODE) {
+ if (gaa->addr_type == ATTO_GAA_AT_PORT
+ && gaa->port_id >= ESAS2R_NUM_PHYS) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ memcpy((u64 *)gaa->address,
+ &a->nvram->sas_addr[0], sizeof(u64));
+ gaa->addr_len = sizeof(u64);
+ }
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_MEM_RW:
+ {
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+
+ break;
+ }
+
+ case ATTO_FUNC_TRACE:
+ {
+ struct atto_hba_trace *trc = &hi->data.trace;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_TRACE1) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_TRACE1;
+ break;
+ }
+
+ if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
+ && hi->version >= ATTO_VER_TRACE1) {
+ if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
+ u32 len = hi->data_length;
+ u32 offset = trc->current_offset;
+ u32 total_len = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Size is zero if a core dump isn't present */
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
+ total_len = 0;
+
+ if (len > total_len)
+ len = total_len;
+
+ if (offset >= total_len
+ || offset + len > total_len
+ || len == 0) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ memcpy(trc + 1,
+ a->fw_coredump_buff + offset,
+ len);
+
+ hi->data_length = len;
+ } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
+ memset(a->fw_coredump_buff, 0,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
+ } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ /* Always return all the info we can. */
+ trc->trace_mask = 0;
+ trc->current_offset = 0;
+ trc->total_length = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Return zero length buffer if core dump not present */
+ if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
+ trc->total_length = 0;
+ } else {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_SCSI_PASS_THRU:
+ {
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ struct scsi_lun lun;
+
+ memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_SCSI_PASS_THRU0;
+ break;
+ }
+
+ if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, NULL);
+
+ sgc->length = hi->data_length;
+ sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
+ + sizeof(struct atto_hba_scsi_pass_thru);
+
+ /* Finish request initialization */
+ rq->target_id = (u16)spt->target_id;
+ rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
+ memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
+ rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
+ rq->sense_len = spt->sense_length;
+ rq->sense_buf = (u8 *)spt->sense_data;
+ /* NOTE: we ignore spt->timeout */
+
+ /*
+ * always usurp the completion callback since the interrupt
+ * callback mechanism may be used.
+ */
+
+ rq->aux_req_cx = hi;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = scsi_passthru_comp_cb;
+
+ if (spt->flags & ATTO_SPTF_DATA_IN) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ } else {
+ if (sgc->length) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+ }
+
+ if (spt->flags & ATTO_SPTF_ORDERED_Q)
+ rq->vrq->scsi.flags |=
+ cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
+ else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
+
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+ break;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+ }
+
+ case ATTO_FUNC_GET_DEV_ADDR:
+ {
+ struct atto_hba_get_device_address *gda =
+ &hi->data.get_dev_addr;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ break;
+ }
+
+ if (gda->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gda->target_id;
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
+ if (t->sas_addr == 0) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ } else {
+ *(u64 *)gda->address = t->sas_addr;
+
+ gda->addr_len = sizeof(u64);
+ }
+ } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ /* update the target ID to the next one present. */
+
+ gda->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gda->target_id);
+ break;
+ }
+
+ case ATTO_FUNC_PHY_CTRL:
+ case ATTO_FUNC_CONN_CTRL:
+ {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ case ATTO_FUNC_ADAP_CTRL:
+ {
+ struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_ADAP_CTRL0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_ADAP_CTRL0;
+ break;
+ }
+
+ if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
+ esas2r_reset_adapter(a);
+ } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (test_bit(AF_CHPRST_NEEDED, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_SCHED;
+ else if (test_bit(AF_CHPRST_PENDING, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
+ else if (test_bit(AF_DISC_PENDING, &a->flags))
+ ac->adap_state = ATTO_AC_AS_RST_DISC;
+ else if (test_bit(AF_DISABLED, &a->flags))
+ ac->adap_state = ATTO_AC_AS_DISABLED;
+ else if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ ac->adap_state = ATTO_AC_AS_DEGRADED;
+ else
+ ac->adap_state = ATTO_AC_AS_OK;
+
+ break;
+ }
+
+ case ATTO_FUNC_GET_DEV_INFO:
+ {
+ struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_INFO0;
+ break;
+ }
+
+ if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gdi->target_id;
+
+ /* update the target ID to the next one present. */
+
+ gdi->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gdi->target_id);
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ default:
+
+ hi->status = ATTO_STS_INV_FUNC;
+ break;
+ }
+
+ return false;
+}
+
+static void hba_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_ioctl *ioctl_hba =
+ (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ esas2r_debug("hba_ioctl_done_callback %d", a->index);
+
+ if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
+ struct atto_hba_get_adapter_info *gai =
+ &ioctl_hba->data.get_adap_info;
+
+ esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
+
+ gai->drvr_rev_major = ESAS2R_MAJOR_REV;
+ gai->drvr_rev_minor = ESAS2R_MINOR_REV;
+
+ strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
+ strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
+ gai->num_lunsper_targ = 1;
+ }
+}
+
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = ioctl_hba;
+ bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
+ bi.callback = hba_ioctl_callback;
+ bi.context = NULL;
+ bi.done_callback = hba_ioctl_done_callback;
+ bi.done_context = NULL;
+ bi.offset = 0;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data)
+{
+ int result = 0;
+
+ a->nvram_command_done = 0;
+ rq->comp_cb = complete_nvr_req;
+
+ if (esas2r_nvram_write(a, rq, data)) {
+ /* now wait around for it to complete. */
+ while (!a->nvram_command_done)
+ wait_event_interruptible(a->nvram_waiter,
+ a->nvram_command_done);
+ ;
+
+ /* done, check the status. */
+ if (rq->req_stat == RS_SUCCESS)
+ result = 1;
+ }
+ return result;
+}
+
+
+/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+{
+ struct atto_express_ioctl *ioctl = NULL;
+ struct esas2r_adapter *a;
+ struct esas2r_request *rq;
+ u16 code;
+ int err;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
+
+ if ((arg == NULL)
+ || (cmd < EXPRESS_IOCTL_MIN)
+ || (cmd > EXPRESS_IOCTL_MAX))
+ return -ENOTSUPP;
+
+ if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler access_ok failed for cmd %d, "
+ "address %p", cmd,
+ arg);
+ return -EFAULT;
+ }
+
+ /* allocate a kernel memory buffer for the IOCTL data */
+ ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
+ if (ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler kzalloc failed for %d bytes",
+ sizeof(struct atto_express_ioctl));
+ return -ENOMEM;
+ }
+
+ err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "copy_from_user didn't copy everything (err %d, cmd %d)",
+ err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ /* verify the signature */
+
+ if (memcmp(ioctl->header.signature,
+ EXPRESS_IOCTL_SIGNATURE,
+ EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+
+ /* assume success */
+
+ ioctl->header.return_code = IOCTL_SUCCESS;
+ err = 0;
+
+ /*
+ * handle EXPRESS_IOCTL_GET_CHANNELS
+ * without paying attention to channel
+ */
+
+ if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
+ int i = 0, k = 0;
+
+ ioctl->data.chanlist.num_channels = 0;
+
+ while (i < MAX_ADAPTERS) {
+ if (esas2r_adapters[i]) {
+ ioctl->data.chanlist.num_channels++;
+ ioctl->data.chanlist.channel[k] = i;
+ k++;
+ }
+ i++;
+ }
+
+ goto ioctl_done;
+ }
+
+ /* get the channel */
+
+ if (ioctl->header.channel == 0xFF) {
+ a = (struct esas2r_adapter *)hostdata;
+ } else {
+ a = esas2r_adapters[ioctl->header.channel];
+ if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+ ioctl->header.return_code = IOCTL_BAD_CHANNEL;
+ esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+ }
+
+ switch (cmd) {
+ case EXPRESS_IOCTL_RW_FIRMWARE:
+
+ if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
+ err = esas2r_write_fw(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fw(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
+ err = esas2r_write_fs(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fs(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else {
+ ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
+ }
+
+ break;
+
+ case EXPRESS_IOCTL_READ_PARAMS:
+
+ memcpy(ioctl->data.prw.data_buffer, a->nvram,
+ sizeof(struct esas2r_sas_nvram));
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_WRITE_PARAMS:
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ up(&a->nvram_semaphore);
+ ioctl->data.prw.code = 0;
+ break;
+ }
+
+ code = esas2r_write_params(a, rq,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = code;
+
+ esas2r_free_request(a, rq);
+
+ break;
+
+ case EXPRESS_IOCTL_DEFAULT_PARAMS:
+
+ esas2r_nvram_get_defaults(a,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_CHAN_INFO:
+
+ ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
+ ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
+ ioctl->data.chaninfo.IRQ = a->pcid->irq;
+ ioctl->data.chaninfo.device_id = a->pcid->device;
+ ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
+ ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
+ ioctl->data.chaninfo.revision_id = a->pcid->revision;
+ ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
+ ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
+ ioctl->data.chaninfo.core_rev = 0;
+ ioctl->data.chaninfo.host_no = a->host->host_no;
+ ioctl->data.chaninfo.hbaapi_rev = 0;
+ break;
+
+ case EXPRESS_IOCTL_SMP:
+ ioctl->header.return_code = handle_smp_ioctl(a,
+ &ioctl->data.
+ ioctl_smp);
+ break;
+
+ case EXPRESS_CSMI:
+ ioctl->header.return_code =
+ handle_csmi_ioctl(a, &ioctl->data.csmi);
+ break;
+
+ case EXPRESS_IOCTL_HBA:
+ ioctl->header.return_code = handle_hba_ioctl(a,
+ &ioctl->data.
+ ioctl_hba);
+ break;
+
+ case EXPRESS_IOCTL_VDA:
+ err = esas2r_write_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+
+ if (err >= 0) {
+ err = esas2r_read_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+ }
+
+
+
+
+ break;
+
+ case EXPRESS_IOCTL_GET_MOD_INFO:
+
+ ioctl->data.modinfo.adapter = a;
+ ioctl->data.modinfo.pci_dev = a->pcid;
+ ioctl->data.modinfo.scsi_host = a->host;
+ ioctl->data.modinfo.host_no = a->host->host_no;
+
+ break;
+
+ default:
+ esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
+ ioctl->header.return_code = IOCTL_ERR_INVCMD;
+ }
+
+ioctl_done:
+
+ if (err < 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+ cmd);
+
+ switch (err) {
+ case -ENOMEM:
+ case -EBUSY:
+ ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
+ break;
+
+ case -ENOSYS:
+ case -EINVAL:
+ ioctl->header.return_code = IOCTL_INVALID_PARAM;
+ break;
+ }
+
+ ioctl->header.return_code = IOCTL_GENERAL_ERROR;
+ }
+
+ /* Always copy the buffer back, if only to pick up the status */
+ err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler copy_to_user didn't copy "
+ "everything (err %d, cmd %d)", err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ kfree(ioctl);
+
+ return 0;
+}
+
+int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+{
+ return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
+}
+
+static void free_fw_buffers(struct esas2r_adapter *a)
+{
+ if (a->firmware.data) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->firmware.orig_len,
+ a->firmware.data,
+ (dma_addr_t)a->firmware.phys);
+
+ a->firmware.data = NULL;
+ }
+}
+
+static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
+{
+ free_fw_buffers(a);
+
+ a->firmware.orig_len = length;
+
+ a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.
+ phys,
+ GFP_KERNEL);
+
+ if (!a->firmware.data) {
+ esas2r_debug("buffer alloc failed!");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Handle a call to read firmware. */
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ esas2r_trace_enter();
+ /* if the cached header is a status, simply copy it over and return. */
+ if (a->firmware.state == FW_STATUS_ST) {
+ int size = min_t(int, count, sizeof(a->firmware.header));
+ esas2r_trace_exit();
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("esas2r_read_fw: STATUS size %d", size);
+ return size;
+ }
+
+ /*
+ * if the cached header is a command, do it if at
+ * offset 0, otherwise copy the pieces.
+ */
+
+ if (a->firmware.state == FW_COMMAND_ST) {
+ u32 length = a->firmware.header.length;
+ esas2r_trace_exit();
+
+ esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
+ length,
+ off);
+
+ if (off == 0) {
+ if (a->firmware.header.action == FI_ACT_UP) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+
+ /* copy header over */
+
+ memcpy(a->firmware.data,
+ &a->firmware.header,
+ sizeof(a->firmware.header));
+
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+ } else if (a->firmware.header.action == FI_ACT_UPSZ) {
+ int size =
+ min((int)count,
+ (int)sizeof(a->firmware.header));
+ do_fm_api(a, &a->firmware.header);
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("FI_ACT_UPSZ size %d", size);
+ return size;
+ } else {
+ esas2r_debug("invalid action %d",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ }
+
+ if (count + off > length)
+ count = length - off;
+
+ if (count < 0)
+ return 0;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "read: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
+ count,
+ length);
+
+ memcpy(buf, &a->firmware.data[off], count);
+
+ /* when done, release the buffer */
+
+ if (length <= off + count) {
+ esas2r_debug("esas2r_read_fw: freeing buffer!");
+
+ free_fw_buffers(a);
+ }
+
+ return count;
+ }
+
+ esas2r_trace_exit();
+ esas2r_debug("esas2r_read_fw: invalid firmware state %d",
+ a->firmware.state);
+
+ return -EINVAL;
+}
+
+/* Handle a call to write firmware. */
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ u32 length;
+
+ if (off == 0) {
+ struct esas2r_flash_img *header =
+ (struct esas2r_flash_img *)buf;
+
+ /* assume version 0 flash image */
+
+ int min_size = sizeof(struct esas2r_flash_img_v0);
+
+ a->firmware.state = FW_INVALID_ST;
+
+ /* validate the version field first */
+
+ if (count < 4
+ || header->fi_version > FI_VERSION_1) {
+ esas2r_debug(
+ "esas2r_write_fw: short header or invalid version");
+ return -EINVAL;
+ }
+
+ /* See if its a version 1 flash image */
+
+ if (header->fi_version == FI_VERSION_1)
+ min_size = sizeof(struct esas2r_flash_img);
+
+ /* If this is the start, the header must be full and valid. */
+ if (count < min_size) {
+ esas2r_debug("esas2r_write_fw: short header, aborting");
+ return -EINVAL;
+ }
+
+ /* Make sure the size is reasonable. */
+ length = header->length;
+
+ if (length > 1024 * 1024) {
+ esas2r_debug(
+ "esas2r_write_fw: hosed, length %d fi_version %d",
+ length, header->fi_version);
+ return -EINVAL;
+ }
+
+ /*
+ * If this is a write command, allocate memory because
+ * we have to cache everything. otherwise, just cache
+ * the header, because the read op will do the command.
+ */
+
+ if (header->action == FI_ACT_DOWN) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+ /*
+ * Store the command, so there is context on subsequent
+ * calls.
+ */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+ } else if (header->action == FI_ACT_UP
+ || header->action == FI_ACT_UPSZ) {
+ /* Save the command, result will be picked up on read */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+
+ a->firmware.state = FW_COMMAND_ST;
+
+ esas2r_debug(
+ "esas2r_write_fw: COMMAND, count %d, action %d ",
+ count, header->action);
+
+ /*
+ * Pretend we took the whole buffer,
+ * so we don't get bothered again.
+ */
+
+ return count;
+ } else {
+ esas2r_debug("esas2r_write_fw: invalid action %d ",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ } else {
+ length = a->firmware.header.length;
+ }
+
+ /*
+ * We only get here on a download command, regardless of offset.
+ * the chunks written by the system need to be cached, and when
+ * the final one arrives, issue the fmapi command.
+ */
+
+ if (off + count > length)
+ count = length - off;
+
+ if (count > 0) {
+ esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
+ count,
+ length);
+
+ /*
+ * On a full upload, the system tries sending the whole buffer.
+ * there's nothing to do with it, so just drop it here, before
+ * trying to copy over into unallocated memory!
+ */
+ if (a->firmware.header.action == FI_ACT_UP)
+ return count;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "write: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ memcpy(&a->firmware.data[off], buf, count);
+
+ if (length == off + count) {
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+
+ /*
+ * Now copy the header result to be picked up by the
+ * next read
+ */
+ memcpy(&a->firmware.header,
+ a->firmware.data,
+ sizeof(a->firmware.header));
+
+ a->firmware.state = FW_STATUS_ST;
+
+ esas2r_debug("write completed");
+
+ /*
+ * Since the system has the data buffered, the only way
+ * this can leak is if a root user writes a program
+ * that writes a shorter buffer than it claims, and the
+ * copyin fails.
+ */
+ free_fw_buffers(a);
+ }
+ }
+
+ return count;
+}
+
+/* Callback for the completion of a VDA request. */
+static void vda_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->vda_command_done = 1;
+ wake_up_interruptible(&a->vda_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
+
+ (*addr) = a->ppvda_buffer + offset;
+ return VDA_MAX_BUFFER_SIZE - offset;
+}
+
+/* Handle a call to read a VDA command. */
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct atto_ioctl_vda *vi =
+ (struct atto_ioctl_vda *)a->vda_buffer;
+ struct esas2r_sg_context sgc;
+ bool wait_for_completion;
+
+ /*
+ * Presumeably, someone has already written to the vda_buffer,
+ * and now they are reading the node the response, so now we
+ * will actually issue the request to the chip and reply.
+ */
+
+ /* allocate a request */
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_vda: out of requestss");
+ return -EBUSY;
+ }
+
+ rq->comp_cb = vda_complete_req;
+
+ sgc.first_req = rq;
+ sgc.adapter = a;
+ sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
+
+ a->vda_command_done = 0;
+
+ wait_for_completion =
+ esas2r_process_vda_ioctl(a, vi, rq, &sgc);
+
+ if (wait_for_completion) {
+ /* now wait around for it to complete. */
+
+ while (!a->vda_command_done)
+ wait_event_interruptible(a->vda_waiter,
+ a->vda_command_done);
+ }
+
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+ }
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->vda_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write a VDA command. */
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ /*
+ * allocate memory for it, if not already done. once allocated,
+ * we will keep it around until the driver is unloaded.
+ */
+
+ if (!a->vda_buffer) {
+ dma_addr_t dma_addr;
+ a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
+
+ a->ppvda_buffer = dma_addr;
+ }
+
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->vda_buffer + off, buf, count);
+
+ return count;
+}
+
+/* Callback for the completion of an FS_API request.*/
+static void fs_api_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fs_api_command_done = 1;
+
+ wake_up_interruptible(&a->fs_api_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+ u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
+
+ (*addr) = a->ppfs_api_buffer + offset;
+
+ return a->fs_api_buffer_size - offset;
+}
+
+/* Handle a call to read firmware via FS_API. */
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+
+ /* If another flash request is already in progress, return. */
+ if (down_interruptible(&a->fs_api_semaphore)) {
+busy:
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return -EBUSY;
+ }
+
+ /*
+ * Presumeably, someone has already written to the
+ * fs_api_buffer, and now they are reading the node the
+ * response, so now we will actually issue the request to the
+ * chip and reply. Allocate a request
+ */
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_fs: out of requests");
+ up(&a->fs_api_semaphore);
+ goto busy;
+ }
+
+ rq->comp_cb = fs_api_complete_req;
+
+ /* Set up the SGCONTEXT for to build the s/g table */
+
+ sgc.cur_offset = fs->data;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
+
+ a->fs_api_command_done = 0;
+
+ if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
+ if (fs->status == ATTO_STS_OUT_OF_RSRC)
+ count = -EBUSY;
+
+ goto dont_wait;
+ }
+
+ /* Now wait around for it to complete. */
+
+ while (!a->fs_api_command_done)
+ wait_event_interruptible(a->fs_api_waiter,
+ a->fs_api_command_done);
+ ;
+dont_wait:
+ /* Free the request and keep going */
+ up(&a->fs_api_semaphore);
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+
+ /* Pick up possible error code from above */
+ if (count < 0)
+ return count;
+ }
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->fs_api_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write firmware via FS_API. */
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ if (off == 0) {
+ struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
+ u32 length = fs->command.length + offsetof(
+ struct esas2r_ioctl_fs,
+ data);
+
+ /*
+ * Special case, for BEGIN commands, the length field
+ * is lying to us, so just get enough for the header.
+ */
+
+ if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
+ length = offsetof(struct esas2r_ioctl_fs, data);
+
+ /*
+ * Beginning a command. We assume we'll get at least
+ * enough in the first write so we can look at the
+ * header and see how much we need to alloc.
+ */
+
+ if (count < offsetof(struct esas2r_ioctl_fs, data))
+ return -EINVAL;
+
+ /* Allocate a buffer or use the existing buffer. */
+ if (a->fs_api_buffer) {
+ if (a->fs_api_buffer_size < length) {
+ /* Free too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+
+ goto re_allocate_buffer;
+ }
+ } else {
+re_allocate_buffer:
+ a->fs_api_buffer_size = length;
+
+ a->fs_api_buffer = (u8 *)dma_alloc_coherent(
+ &a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
+ }
+ }
+
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->fs_api_buffer + off, buf, count);
+
+ return count;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
new file mode 100644
index 000000000000..9bf285df58dd
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -0,0 +1,254 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * this module within the driver is tasked with providing logging functionality.
+ * the event_log_level module parameter controls the level of messages that are
+ * written to the system log. the default level of messages that are written
+ * are critical and warning messages. if other types of messages are desired,
+ * one simply needs to load the module with the correct value for the
+ * event_log_level module parameter. for example:
+ *
+ * insmod <module> event_log_level=1
+ *
+ * will load the module and only critical events will be written by this module
+ * to the system log. if critical, warning, and information-level messages are
+ * desired, the correct value for the event_log_level module parameter
+ * would be as follows:
+ *
+ * insmod <module> event_log_level=3
+ */
+
+#define EVENT_LOG_BUFF_SIZE 1024
+
+static long event_log_level = ESAS2R_LOG_DFLT;
+
+module_param(event_log_level, long, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(event_log_level,
+ "Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
+
+/* A shared buffer to use for formatting messages. */
+static char event_buffer[EVENT_LOG_BUFF_SIZE];
+
+/* A lock to protect the shared buffer used for formatting messages. */
+static DEFINE_SPINLOCK(event_buffer_lock);
+
+/**
+ * translates an esas2r-defined logging event level to a kernel logging level.
+ *
+ * @param [in] level the esas2r-defined logging event level to translate
+ *
+ * @return the corresponding kernel logging level.
+ */
+static const char *translate_esas2r_event_level_to_kernel(const long level)
+{
+ switch (level) {
+ case ESAS2R_LOG_CRIT:
+ return KERN_CRIT;
+
+ case ESAS2R_LOG_WARN:
+ return KERN_WARNING;
+
+ case ESAS2R_LOG_INFO:
+ return KERN_INFO;
+
+ case ESAS2R_LOG_DEBG:
+ case ESAS2R_LOG_TRCE:
+ default:
+ return KERN_DEBUG;
+ }
+}
+
+/**
+ * the master logging function. this function will format the message as
+ * outlined by the formatting string, the input device information and the
+ * substitution arguments and output the resulting string to the system log.
+ *
+ * @param [in] level the event log level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] args the substition arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+static int esas2r_log_master(const long level,
+ const struct device *dev,
+ const char *format,
+ va_list args)
+{
+ if (level <= event_log_level) {
+ unsigned long flags = 0;
+ int retval = 0;
+ char *buffer = event_buffer;
+ size_t buflen = EVENT_LOG_BUFF_SIZE;
+ const char *fmt_nodev = "%s%s: ";
+ const char *fmt_dev = "%s%s [%s, %s, %s]";
+ const char *slevel =
+ translate_esas2r_event_level_to_kernel(level);
+
+ spin_lock_irqsave(&event_buffer_lock, flags);
+
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ memset(buffer, 0, buflen);
+
+ /*
+ * format the level onto the beginning of the string and do
+ * some pointer arithmetic to move the pointer to the point
+ * where the actual message can be inserted.
+ */
+
+ if (dev == NULL) {
+ snprintf(buffer, buflen, fmt_nodev, slevel,
+ ESAS2R_DRVR_NAME);
+ } else {
+ snprintf(buffer, buflen, fmt_dev, slevel,
+ ESAS2R_DRVR_NAME,
+ (dev->driver ? dev->driver->name : "unknown"),
+ (dev->bus ? dev->bus->name : "unknown"),
+ dev_name(dev));
+ }
+
+ buffer += strlen(event_buffer);
+ buflen -= strlen(event_buffer);
+
+ retval = vsnprintf(buffer, buflen, format, args);
+ if (retval < 0) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ /*
+ * Put a line break at the end of the formatted string so that
+ * we don't wind up with run-on messages. only append if there
+ * is enough space in the buffer.
+ */
+ if (strlen(event_buffer) < buflen)
+ strcat(buffer, "\n");
+
+ printk(event_buffer);
+
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * formats and logs a message to the system log.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] format the formating string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log(const long level, const char *format, ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, NULL, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, dev, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] buf
+ * @param [in] len
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len)
+{
+ if (level <= event_log_level) {
+ print_hex_dump(translate_esas2r_event_level_to_kernel(level),
+ "", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ }
+
+ return 1;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
new file mode 100644
index 000000000000..7b6397bb5b94
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -0,0 +1,118 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __esas2r_log_h__
+#define __esas2r_log_h__
+
+struct device;
+
+enum {
+ ESAS2R_LOG_NONE = 0, /* no events logged */
+ ESAS2R_LOG_CRIT = 1, /* critical events */
+ ESAS2R_LOG_WARN = 2, /* warning events */
+ ESAS2R_LOG_INFO = 3, /* info events */
+ ESAS2R_LOG_DEBG = 4, /* debugging events */
+ ESAS2R_LOG_TRCE = 5, /* tracing events */
+
+#ifdef ESAS2R_TRACE
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
+#else
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
+#endif
+};
+
+int esas2r_log(const long level, const char *format, ...);
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...);
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len);
+
+/*
+ * the following macros are provided specifically for debugging and tracing
+ * messages. esas2r_debug() is provided for generic non-hardware layer
+ * debugging and tracing events. esas2r_hdebug is provided specifically for
+ * hardware layer debugging and tracing events.
+ */
+
+#ifdef ESAS2R_DEBUG
+#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#else
+#define esas2r_debug(f, args ...)
+#define esas2r_hdebug(f, args ...)
+#endif /* ESAS2R_DEBUG */
+
+/*
+ * the following macros are provided in order to trace the driver and catch
+ * some more serious bugs. be warned, enabling these macros may *severely*
+ * impact performance.
+ */
+
+#ifdef ESAS2R_TRACE
+#define esas2r_bugon() \
+ do { \
+ esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
+ " - dumping stack and stopping kernel", __func__, \
+ __LINE__); \
+ dump_stack(); \
+ BUG(); \
+ } while (0)
+
+#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
+ f, __func__, __FILE__, __LINE__, \
+ ## args)
+#else
+#define esas2r_bugon()
+#define esas2r_trace_enter()
+#define esas2r_trace_exit()
+#define esas2r_trace(f, args ...)
+#endif /* ESAS2R_TRACE */
+
+#endif /* __esas2r_log_h__ */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
new file mode 100644
index 000000000000..f37f3e3dd5d5
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -0,0 +1,2032 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_main.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
+MODULE_AUTHOR("ATTO Technology, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ESAS2R_VERSION_STR);
+
+/* global definitions */
+
+static int found_adapters;
+struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
+
+#define ESAS2R_VDA_EVENT_PORT1 54414
+#define ESAS2R_VDA_EVENT_PORT2 54415
+#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
+
+static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+
+ return (struct esas2r_adapter *)host->hostdata;
+}
+
+static ssize_t read_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fw(a, buf, off, count);
+}
+
+static ssize_t write_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_fw(a, buf, off, count);
+}
+
+static ssize_t read_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fs(a, buf, off, count);
+}
+
+static ssize_t write_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct esas2r_ioctl_fs), count);
+ int result = 0;
+
+ result = esas2r_write_fs(a, buf, off, count);
+
+ if (result < 0)
+ result = 0;
+
+ return length;
+}
+
+static ssize_t read_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_vda(a, buf, off, count);
+}
+
+static ssize_t write_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_vda(a, buf, off, count);
+}
+
+static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
+
+ memcpy(buf, a->nvram, length);
+ return length;
+}
+
+static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ struct esas2r_request *rq;
+ int result = -EFAULT;
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL)
+ return -ENOMEM;
+
+ if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
+ result = count;
+
+ esas2r_free_request(a, rq);
+
+ return result;
+}
+
+static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
+
+ return sizeof(struct esas2r_sas_nvram);
+}
+
+static ssize_t read_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
+
+ if (!a->local_atto_ioctl)
+ return -ENOMEM;
+
+ if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
+ return -ENOMEM;
+
+ memcpy(buf, a->local_atto_ioctl, length);
+
+ return length;
+}
+
+static ssize_t write_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct atto_ioctl), count);
+
+ if (!a->local_atto_ioctl) {
+ a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+ GFP_KERNEL);
+ if (a->local_atto_ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "write_hw kzalloc failed for %d bytes",
+ sizeof(struct atto_ioctl));
+ return -ENOMEM;
+ }
+ }
+
+ memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
+ memcpy(a->local_atto_ioctl, buf, length);
+
+ return length;
+}
+
+#define ESAS2R_RW_BIN_ATTR(_name) \
+ struct bin_attribute bin_attr_ ## _name = { \
+ .attr = \
+ { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
+ .size = 0, \
+ .read = read_ ## _name, \
+ .write = write_ ## _name }
+
+ESAS2R_RW_BIN_ATTR(fw);
+ESAS2R_RW_BIN_ATTR(fs);
+ESAS2R_RW_BIN_ATTR(vda);
+ESAS2R_RW_BIN_ATTR(hw);
+ESAS2R_RW_BIN_ATTR(live_nvram);
+
+struct bin_attribute bin_attr_default_nvram = {
+ .attr = { .name = "default_nvram", .mode = S_IRUGO },
+ .size = 0,
+ .read = read_default_nvram,
+ .write = NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .show_info = esas2r_show_info,
+ .name = ESAS2R_LONGNAME,
+ .release = esas2r_release,
+ .info = esas2r_info,
+ .ioctl = esas2r_ioctl,
+ .queuecommand = esas2r_queuecommand,
+ .eh_abort_handler = esas2r_eh_abort,
+ .eh_device_reset_handler = esas2r_device_reset,
+ .eh_bus_reset_handler = esas2r_bus_reset,
+ .eh_host_reset_handler = esas2r_host_reset,
+ .eh_target_reset_handler = esas2r_target_reset,
+ .can_queue = 128,
+ .this_id = -1,
+ .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
+ .cmd_per_lun =
+ ESAS2R_DEFAULT_CMD_PER_LUN,
+ .present = 0,
+ .unchecked_isa_dma = 0,
+ .use_clustering = ENABLE_CLUSTERING,
+ .emulated = 0,
+ .proc_name = ESAS2R_DRVR_NAME,
+ .slave_configure = esas2r_slave_configure,
+ .slave_alloc = esas2r_slave_alloc,
+ .slave_destroy = esas2r_slave_destroy,
+ .change_queue_depth = esas2r_change_queue_depth,
+ .change_queue_type = esas2r_change_queue_type,
+ .max_sectors = 0xFFFF,
+};
+
+int sgl_page_size = 512;
+module_param(sgl_page_size, int, 0);
+MODULE_PARM_DESC(sgl_page_size,
+ "Scatter/gather list (SGL) page size in number of S/G "
+ "entries. If your application is doing a lot of very large "
+ "transfers, you may want to increase the SGL page size. "
+ "Default 512.");
+
+int num_sg_lists = 1024;
+module_param(num_sg_lists, int, 0);
+MODULE_PARM_DESC(num_sg_lists,
+ "Number of scatter/gather lists. Default 1024.");
+
+int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+module_param(sg_tablesize, int, 0);
+MODULE_PARM_DESC(sg_tablesize,
+ "Maximum number of entries in a scatter/gather table.");
+
+int num_requests = 256;
+module_param(num_requests, int, 0);
+MODULE_PARM_DESC(num_requests,
+ "Number of requests. Default 256.");
+
+int num_ae_requests = 4;
+module_param(num_ae_requests, int, 0);
+MODULE_PARM_DESC(num_ae_requests,
+ "Number of VDA asynchromous event requests. Default 4.");
+
+int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
+module_param(cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+ "Maximum number of commands per LUN. Default "
+ DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
+
+int can_queue = 128;
+module_param(can_queue, int, 0);
+MODULE_PARM_DESC(can_queue,
+ "Maximum number of commands per adapter. Default 128.");
+
+int esas2r_max_sectors = 0xFFFF;
+module_param(esas2r_max_sectors, int, 0);
+MODULE_PARM_DESC(esas2r_max_sectors,
+ "Maximum number of disk sectors in a single data transfer. "
+ "Default 65535 (largest possible setting).");
+
+int interrupt_mode = 1;
+module_param(interrupt_mode, int, 0);
+MODULE_PARM_DESC(interrupt_mode,
+ "Defines the interrupt mode to use. 0 for legacy"
+ ", 1 for MSI. Default is MSI (1).");
+
+static struct pci_device_id
+ esas2r_pci_table[] = {
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
+ 0,
+ 0, 0 },
+ { 0, 0, 0, 0,
+ 0,
+ 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
+
+static int
+esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
+
+static void
+esas2r_remove(struct pci_dev *pcid);
+
+static struct pci_driver
+ esas2r_pci_driver = {
+ .name = ESAS2R_DRVR_NAME,
+ .id_table = esas2r_pci_table,
+ .probe = esas2r_probe,
+ .remove = esas2r_remove,
+ .suspend = esas2r_suspend,
+ .resume = esas2r_resume,
+};
+
+static int esas2r_probe(struct pci_dev *pcid,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *host = NULL;
+ struct esas2r_adapter *a;
+ int err;
+
+ size_t host_alloc_size = sizeof(struct esas2r_adapter)
+ + ((num_requests) +
+ 1) * sizeof(struct esas2r_request);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
+ "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
+ pcid->vendor,
+ pcid->device,
+ pcid->subsystem_vendor,
+ pcid->subsystem_device);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "before pci_enable_device() "
+ "enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ err = pci_enable_device(pcid);
+ if (err != 0) {
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
+ "pci_enable_device() FAIL (%d)",
+ err);
+ return -ENODEV;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "pci_enable_device() OK");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "after pci_device_enable() enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ host = scsi_host_alloc(&driver_template, host_alloc_size);
+ if (host == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
+ return -ENODEV;
+ }
+
+ memset(host->hostdata, 0, host_alloc_size);
+
+ a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
+
+ /* override max LUN and max target id */
+
+ host->max_id = ESAS2R_MAX_ID + 1;
+ host->max_lun = 255;
+
+ /* we can handle 16-byte CDbs */
+
+ host->max_cmd_len = 16;
+
+ host->can_queue = can_queue;
+ host->cmd_per_lun = cmd_per_lun;
+ host->this_id = host->max_id + 1;
+ host->max_channel = 0;
+ host->unique_id = found_adapters;
+ host->sg_tablesize = sg_tablesize;
+ host->max_sectors = esas2r_max_sectors;
+
+ /* set to bus master for BIOses that don't do it for us */
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
+
+ pci_set_master(pcid);
+
+ if (!esas2r_init_adapter(host, pcid, found_adapters)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to initialize device at PCI bus %x:%x",
+ pcid->bus->number,
+ pcid->devfn);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ return 0;
+
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
+ host->hostdata);
+
+ pci_set_drvdata(pcid, host);
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
+
+ err = scsi_add_host(host, &pcid->dev);
+
+ if (err) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
+ "scsi_add_host() FAIL");
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "pci_set_drvdata(%p, NULL) called",
+ pcid);
+
+ pci_set_drvdata(pcid, NULL);
+
+ return -ENODEV;
+ }
+
+
+ esas2r_fw_event_on(a);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_scan_host() called");
+
+ scsi_scan_host(host);
+
+ /* Add sysfs binary files */
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fw");
+ else
+ a->sysfs_fw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fs");
+ else
+ a->sysfs_fs_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: vda");
+ else
+ a->sysfs_vda_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: hw");
+ else
+ a->sysfs_hw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: live_nvram");
+ else
+ a->sysfs_live_nvram_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj,
+ &bin_attr_default_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: default_nvram");
+ else
+ a->sysfs_default_nvram_created = 1;
+
+ found_adapters++;
+
+ return 0;
+}
+
+static void esas2r_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ int index;
+
+ if (pdev == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
+ return;
+ }
+
+ host = pci_get_drvdata(pdev);
+
+ if (host == NULL) {
+ /*
+ * this can happen if pci_set_drvdata was already called
+ * to clear the host pointer. if this is the case, we
+ * are okay; this channel has already been cleaned up.
+ */
+
+ return;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "esas2r_remove(%p) called; "
+ "host:%p", pdev,
+ host);
+
+ index = esas2r_cleanup(host);
+
+ if (index < 0)
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
+ "unknown host in %s",
+ __func__);
+
+ found_adapters--;
+
+ /* if this was the last adapter, clean up the rest of the driver */
+
+ if (found_adapters == 0)
+ esas2r_cleanup(NULL);
+}
+
+static int __init esas2r_init(void)
+{
+ int i;
+
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ /* verify valid parameters */
+
+ if (can_queue < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be at least 1, value "
+ "forced.");
+ can_queue = 1;
+ } else if (can_queue > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be no larger than 2048, "
+ "value forced.");
+ can_queue = 2048;
+ }
+
+ if (cmd_per_lun < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be at least 1, value "
+ "forced.");
+ cmd_per_lun = 1;
+ } else if (cmd_per_lun > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be no larger than "
+ "2048, value forced.");
+ cmd_per_lun = 2048;
+ }
+
+ if (sg_tablesize < 32) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: sg_tablesize must be at least 32, "
+ "value forced.");
+ sg_tablesize = 32;
+ }
+
+ if (esas2r_max_sectors < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be at least "
+ "1, value forced.");
+ esas2r_max_sectors = 1;
+ } else if (esas2r_max_sectors > 0xffff) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be no larger "
+ "than 0xffff, value forced.");
+ esas2r_max_sectors = 0xffff;
+ }
+
+ sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
+
+ if (sgl_page_size < SGL_PG_SZ_MIN)
+ sgl_page_size = SGL_PG_SZ_MIN;
+ else if (sgl_page_size > SGL_PG_SZ_MAX)
+ sgl_page_size = SGL_PG_SZ_MAX;
+
+ if (num_sg_lists < NUM_SGL_MIN)
+ num_sg_lists = NUM_SGL_MIN;
+ else if (num_sg_lists > NUM_SGL_MAX)
+ num_sg_lists = NUM_SGL_MAX;
+
+ if (num_requests < NUM_REQ_MIN)
+ num_requests = NUM_REQ_MIN;
+ else if (num_requests > NUM_REQ_MAX)
+ num_requests = NUM_REQ_MAX;
+
+ if (num_ae_requests < NUM_AE_MIN)
+ num_ae_requests = NUM_AE_MIN;
+ else if (num_ae_requests > NUM_AE_MAX)
+ num_ae_requests = NUM_AE_MAX;
+
+ /* set up other globals */
+
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_adapters[i] = NULL;
+
+ /* initialize */
+
+ driver_template.module = THIS_MODULE;
+
+ if (pci_register_driver(&esas2r_pci_driver) != 0)
+ esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
+ else
+ esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
+
+ if (!found_adapters) {
+ pci_unregister_driver(&esas2r_pci_driver);
+ esas2r_cleanup(NULL);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "driver will not be loaded because no ATTO "
+ "%s devices were found",
+ ESAS2R_DRVR_NAME);
+ return -1;
+ } else {
+ esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
+ found_adapters);
+ }
+
+ return 0;
+}
+
+/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
+static const struct file_operations esas2r_proc_fops = {
+ .compat_ioctl = esas2r_proc_ioctl,
+ .unlocked_ioctl = esas2r_proc_ioctl,
+};
+
+static struct Scsi_Host *esas2r_proc_host;
+static int esas2r_proc_major;
+
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
+ (int)cmd, (void __user *)arg);
+}
+
+static void __exit esas2r_exit(void)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ if (esas2r_proc_major > 0) {
+ esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
+
+ remove_proc_entry(ATTONODE_NAME,
+ esas2r_proc_host->hostt->proc_dir);
+ unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
+
+ esas2r_proc_major = 0;
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
+
+ pci_unregister_driver(&esas2r_pci_driver);
+}
+
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+
+ struct esas2r_target *t;
+ int dev_count = 0;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
+
+ seq_printf(m, ESAS2R_LONGNAME "\n"
+ "Driver version: "ESAS2R_VERSION_STR "\n"
+ "Flash version: %s\n"
+ "Firmware version: %s\n"
+ "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
+ "http://www.attotech.com\n"
+ "\n",
+ a->flash_rev,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+
+ seq_printf(m, "Adapter information:\n"
+ "--------------------\n"
+ "Model: %s\n"
+ "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
+ esas2r_get_model_name(a),
+ a->nvram->sas_addr[0],
+ a->nvram->sas_addr[1],
+ a->nvram->sas_addr[2],
+ a->nvram->sas_addr[3],
+ a->nvram->sas_addr[4],
+ a->nvram->sas_addr[5],
+ a->nvram->sas_addr[6],
+ a->nvram->sas_addr[7]);
+
+ seq_puts(m, "\n"
+ "Discovered devices:\n"
+ "\n"
+ " # Target ID\n"
+ "---------------\n");
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->buffered_target_state == TS_PRESENT) {
+ seq_printf(m, " %3d %3d\n",
+ ++dev_count,
+ (u16)(uintptr_t)(t - a->targetdb));
+ }
+
+ if (dev_count == 0)
+ seq_puts(m, "none\n");
+
+ seq_puts(m, "\n");
+ return 0;
+
+}
+
+int esas2r_release(struct Scsi_Host *sh)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_release() called");
+
+ esas2r_cleanup(sh);
+ if (sh->irq)
+ free_irq(sh->irq, NULL);
+ scsi_unregister(sh);
+ return 0;
+}
+
+const char *esas2r_info(struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+ static char esas2r_info_str[512];
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_info() called");
+
+ /*
+ * if we haven't done so already, register as a char driver
+ * and stick a node under "/proc/scsi/esas2r/ATTOnode"
+ */
+
+ if (esas2r_proc_major <= 0) {
+ esas2r_proc_host = sh;
+
+ esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
+ &esas2r_proc_fops);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
+ "register_chrdev (major %d)",
+ esas2r_proc_major);
+
+ if (esas2r_proc_major > 0) {
+ struct proc_dir_entry *pde;
+
+ pde = proc_create(ATTONODE_NAME, 0,
+ sh->hostt->proc_dir,
+ &esas2r_proc_fops);
+
+ if (!pde) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(sh->shost_gendev),
+ "failed to create_proc_entry");
+ esas2r_proc_major = -1;
+ }
+ }
+ }
+
+ sprintf(esas2r_info_str,
+ ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
+ " driver version: "ESAS2R_VERSION_STR " firmware version: "
+ "%s\n",
+ a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+ return esas2r_info_str;
+}
+
+/* Callback for building a request scatter/gather list */
+static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ u32 len;
+
+ if (likely(sgc->cur_offset == sgc->exp_offset)) {
+ /*
+ * the normal case: caller used all bytes from previous call, so
+ * expected offset is the same as the current offset.
+ */
+
+ if (sgc->sgel_count < sgc->num_sgel) {
+ /* retrieve next segment, except for first time */
+ if (sgc->exp_offset > (u8 *)0) {
+ /* advance current segment */
+ sgc->cur_sgel = sg_next(sgc->cur_sgel);
+ ++(sgc->sgel_count);
+ }
+
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ /* save the total # bytes returned to caller so far */
+ sgc->exp_offset += len;
+
+ } else {
+ len = 0;
+ }
+ } else if (sgc->cur_offset < sgc->exp_offset) {
+ /*
+ * caller did not use all bytes from previous call. need to
+ * compute the address based on current segment.
+ */
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ sgc->exp_offset -= len;
+
+ /* calculate PA based on prev segment address and offsets */
+ *addr = *addr +
+ (sgc->cur_offset - sgc->exp_offset);
+
+ sgc->exp_offset += len;
+
+ /* re-calculate length based on offset */
+ len = lower_32_bits(
+ sgc->exp_offset - sgc->cur_offset);
+ } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
+ /*
+ * we don't expect the caller to skip ahead.
+ * cur_offset will never exceed the len we return
+ */
+ len = 0;
+ }
+
+ return len;
+}
+
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ unsigned bufflen;
+
+ /* Assume success, if it fails we will fix the result later. */
+ cmd->result = DID_OK << 16;
+
+ if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (unlikely(rq == NULL)) {
+ esas2r_debug("esas2r_alloc_request failed");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ rq->cmd = cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (likely(bufflen != 0)) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ }
+
+ memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
+ rq->vrq->scsi.length = cpu_to_le32(bufflen);
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->sense_buf = cmd->sense_buffer;
+ rq->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ esas2r_sgc_init(&sgc, a, rq, NULL);
+
+ sgc.length = bufflen;
+ sgc.cur_offset = NULL;
+
+ sgc.cur_sgel = scsi_sglist(cmd);
+ sgc.exp_offset = NULL;
+ sgc.num_sgel = scsi_dma_map(cmd);
+ sgc.sgel_count = 0;
+
+ if (unlikely(sgc.num_sgel < 0)) {
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
+
+ if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
+ scsi_dma_unmap(cmd);
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
+ (int)cmd->device->lun);
+
+ esas2r_start_request(a, rq);
+
+ return 0;
+}
+
+static void complete_task_management_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ (*rq->task_management_status_ptr) = rq->req_stat;
+ esas2r_free_request(a, rq);
+}
+
+/**
+ * Searches the specified queue for the specified queue for the command
+ * to abort.
+ *
+ * @param [in] a
+ * @param [in] abort_request
+ * @param [in] cmd
+ * t
+ * @return 0 on failure, 1 if command was not found, 2 if command was found
+ */
+static int esas2r_check_active_queue(struct esas2r_adapter *a,
+ struct esas2r_request **abort_request,
+ struct scsi_cmnd *cmd,
+ struct list_head *queue)
+{
+ bool found = false;
+ struct esas2r_request *ar = *abort_request;
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, queue) {
+
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->cmd == cmd) {
+
+ /* Found the request. See what to do with it. */
+ if (queue == &a->active_list) {
+ /*
+ * We are searching the active queue, which
+ * means that we need to send an abort request
+ * to the firmware.
+ */
+ ar = esas2r_alloc_request(a);
+ if (ar == NULL) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "unable to allocate an abort request for cmd %p",
+ cmd);
+ return 0; /* Failure */
+ }
+
+ /*
+ * Task management request must be formatted
+ * with a lock held.
+ */
+ ar->sense_len = 0;
+ ar->vrq->scsi.length = 0;
+ ar->target_id = rq->target_id;
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ (u8)le32_to_cpu(rq->vrq->scsi.flags));
+
+ memset(ar->vrq->scsi.cdb, 0,
+ sizeof(ar->vrq->scsi.cdb));
+
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ FCP_CMND_TRM);
+ ar->vrq->scsi.u.abort_handle =
+ rq->vrq->scsi.handle;
+ } else {
+ /*
+ * The request is pending but not active on
+ * the firmware. Just free it now and we'll
+ * report the successful abort below.
+ */
+ list_del_init(&rq->req_list);
+ esas2r_free_request(a, rq);
+ }
+
+ found = true;
+ break;
+ }
+
+ }
+
+ if (!found)
+ return 1; /* Not found */
+
+ return 2; /* found */
+
+
+}
+
+int esas2r_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *abort_request = NULL;
+ unsigned long flags;
+ struct list_head *queue;
+ int result;
+
+ esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return 0;
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /*
+ * Run through the defer and active queues looking for the request
+ * to abort.
+ */
+
+ queue = &a->defer_list;
+
+check_active_queue:
+
+ result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
+
+ if (!result) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ return FAILED;
+ } else if (result == 2 && (queue == &a->defer_list)) {
+ queue = &a->active_list;
+ goto check_active_queue;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (abort_request) {
+ u8 task_management_status = RS_PENDING;
+
+ /*
+ * the request is already active, so we need to tell
+ * the firmware to abort it and wait for the response.
+ */
+
+ abort_request->comp_cb = complete_task_management_request;
+ abort_request->task_management_status_ptr =
+ &task_management_status;
+
+ esas2r_start_request(a, abort_request);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+
+ /*
+ * Once we get here, the original request will have been
+ * completed by the firmware and the abort request will have
+ * been cleaned up. we're done!
+ */
+
+ return SUCCESS;
+ }
+
+ /*
+ * If we get here, either we found the inactive request and
+ * freed it, or we didn't find it at all. Either way, success!
+ */
+
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return SUCCESS;
+}
+
+static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ if (host_reset)
+ esas2r_reset_adapter(a);
+ else
+ esas2r_reset_bus(a);
+
+ /* above call sets the AF_OS_RESET flag. wait for it to clear. */
+
+ while (test_bit(AF_OS_RESET, &a->flags)) {
+ msleep(10);
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ return SUCCESS;
+}
+
+int esas2r_host_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, true);
+}
+
+int esas2r_bus_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, false);
+}
+
+static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ u8 task_management_status = RS_PENDING;
+ bool completed;
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+retry:
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ if (target_reset) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "target reset (%d)!",
+ cmd->device->id);
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "device reset (%d:%d)!",
+ cmd->device->id,
+ cmd->device->lun);
+ }
+
+
+ return FAILED;
+ }
+
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->req_stat = RS_PENDING;
+
+ rq->comp_cb = complete_task_management_request;
+ rq->task_management_status_ptr = &task_management_status;
+
+ if (target_reset) {
+ esas2r_debug("issuing target reset (%p) to id %d", rq,
+ cmd->device->id);
+ completed = esas2r_send_task_mgmt(a, rq, 0x20);
+ } else {
+ esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
+ cmd->device->id, cmd->device->lun);
+ completed = esas2r_send_task_mgmt(a, rq, 0x10);
+ }
+
+ if (completed) {
+ /* Task management cmd completed right away, need to free it. */
+
+ esas2r_free_request(a, rq);
+ } else {
+ /*
+ * Wait for firmware to complete the request. Completion
+ * callback will free it.
+ */
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags))
+ return FAILED;
+
+ if (task_management_status == RS_BUSY) {
+ /*
+ * Busy, probably because we are flashing. Wait a bit and
+ * try again.
+ */
+ msleep(100);
+ goto retry;
+ }
+
+ return SUCCESS;
+}
+
+int esas2r_device_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, false);
+
+}
+
+int esas2r_target_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, true);
+}
+
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
+
+ scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
+
+ return dev->queue_depth;
+}
+
+int esas2r_change_queue_type(struct scsi_device *dev, int type)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
+
+ if (dev->tagged_supported) {
+ scsi_set_tag_type(dev, type);
+
+ if (type)
+ scsi_activate_tcq(dev, dev->queue_depth);
+ else
+ scsi_deactivate_tcq(dev, dev->queue_depth);
+ } else {
+ type = 0;
+ }
+
+ return type;
+}
+
+int esas2r_slave_alloc(struct scsi_device *dev)
+{
+ return 0;
+}
+
+int esas2r_slave_configure(struct scsi_device *dev)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+ "esas2r_slave_configure()");
+
+ if (dev->tagged_supported) {
+ scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
+ scsi_activate_tcq(dev, cmd_per_lun);
+ } else {
+ scsi_set_tag_type(dev, 0);
+ scsi_deactivate_tcq(dev, cmd_per_lun);
+ }
+
+ return 0;
+}
+
+void esas2r_slave_destroy(struct scsi_device *dev)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+ "esas2r_slave_destroy()");
+}
+
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 reqstatus = rq->req_stat;
+
+ if (reqstatus == RS_SUCCESS)
+ return;
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ if (reqstatus == RS_SCSI_ERROR) {
+ if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
+ rq->sense_buf[2], rq->sense_buf[12],
+ rq->sense_buf[13],
+ rq->vrq->scsi.cdb[0]);
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error CDB:%x\n",
+ rq->vrq->scsi.cdb[0]);
+ }
+ } else if ((rq->vrq->scsi.cdb[0] != INQUIRY
+ && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
+ || (reqstatus != RS_SEL
+ && reqstatus != RS_SEL2)) {
+ if ((reqstatus == RS_UNDERRUN) &&
+ (rq->vrq->scsi.cdb[0] == INQUIRY)) {
+ /* Don't log inquiry underruns */
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - cdb:%x reqstatus:%d target:%d",
+ rq->vrq->scsi.cdb[0], reqstatus,
+ rq->target_id);
+ }
+ }
+ }
+}
+
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ u32 starttime;
+ u32 timeout;
+
+ starttime = jiffies_to_msecs(jiffies);
+ timeout = rq->timeout ? rq->timeout : 5000;
+
+ while (true) {
+ esas2r_polled_interrupt(a);
+
+ if (rq->req_stat != RS_STARTED)
+ break;
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ esas2r_hdebug("request TMO");
+ esas2r_bugon();
+
+ rq->req_stat = RS_TIMEOUT;
+
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+}
+
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
+{
+ u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
+ u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
+
+ if (a->window_base != base) {
+ esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
+ base | MVRPW1R_ENABLE);
+ esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
+ a->window_base = base;
+ }
+
+ return offset;
+}
+
+/* Read a block of data from chip memory */
+bool esas2r_read_mem_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+
+ offset = from & (MW_DATA_WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > MW_DATA_WINDOW_SIZE - offset)
+ len = MW_DATA_WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ return true;
+}
+
+void esas2r_nuxi_mgt_data(u8 function, void *data)
+{
+ struct atto_vda_grp_info *g;
+ struct atto_vda_devinfo *d;
+ struct atto_vdapart_info *p;
+ struct atto_vda_dh_info *h;
+ struct atto_vda_metrics_info *m;
+ struct atto_vda_schedule_info *s;
+ struct atto_vda_buzzer_info *b;
+ u8 i;
+
+ switch (function) {
+ case VDAMGT_BUZZER_INFO:
+ case VDAMGT_BUZZER_SET:
+
+ b = (struct atto_vda_buzzer_info *)data;
+
+ b->duration = le32_to_cpu(b->duration);
+ break;
+
+ case VDAMGT_SCHEDULE_INFO:
+ case VDAMGT_SCHEDULE_EVENT:
+
+ s = (struct atto_vda_schedule_info *)data;
+
+ s->id = le32_to_cpu(s->id);
+
+ break;
+
+ case VDAMGT_DEV_INFO:
+ case VDAMGT_DEV_CLEAN:
+ case VDAMGT_DEV_PT_INFO:
+ case VDAMGT_DEV_FEATURES:
+ case VDAMGT_DEV_PT_FEATURES:
+ case VDAMGT_DEV_OPERATION:
+
+ d = (struct atto_vda_devinfo *)data;
+
+ d->capacity = le64_to_cpu(d->capacity);
+ d->block_size = le32_to_cpu(d->block_size);
+ d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
+ d->target_id = le16_to_cpu(d->target_id);
+ d->lun = le16_to_cpu(d->lun);
+ d->features = le16_to_cpu(d->features);
+ break;
+
+ case VDAMGT_GRP_INFO:
+ case VDAMGT_GRP_CREATE:
+ case VDAMGT_GRP_DELETE:
+ case VDAMGT_ADD_STORAGE:
+ case VDAMGT_MEMBER_ADD:
+ case VDAMGT_GRP_COMMIT:
+ case VDAMGT_GRP_REBUILD:
+ case VDAMGT_GRP_COMMIT_INIT:
+ case VDAMGT_QUICK_RAID:
+ case VDAMGT_GRP_FEATURES:
+ case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
+ case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
+ case VDAMGT_SPARE_LIST:
+ case VDAMGT_SPARE_ADD:
+ case VDAMGT_SPARE_REMOVE:
+ case VDAMGT_LOCAL_SPARE_ADD:
+ case VDAMGT_GRP_OPERATION:
+
+ g = (struct atto_vda_grp_info *)data;
+
+ g->capacity = le64_to_cpu(g->capacity);
+ g->block_size = le32_to_cpu(g->block_size);
+ g->interleave = le32_to_cpu(g->interleave);
+ g->features = le16_to_cpu(g->features);
+
+ for (i = 0; i < 32; i++)
+ g->members[i] = le16_to_cpu(g->members[i]);
+
+ break;
+
+ case VDAMGT_PART_INFO:
+ case VDAMGT_PART_MAP:
+ case VDAMGT_PART_UNMAP:
+ case VDAMGT_PART_AUTOMAP:
+ case VDAMGT_PART_SPLIT:
+ case VDAMGT_PART_MERGE:
+
+ p = (struct atto_vdapart_info *)data;
+
+ p->part_size = le64_to_cpu(p->part_size);
+ p->start_lba = le32_to_cpu(p->start_lba);
+ p->block_size = le32_to_cpu(p->block_size);
+ p->target_id = le16_to_cpu(p->target_id);
+ break;
+
+ case VDAMGT_DEV_HEALTH_REQ:
+
+ h = (struct atto_vda_dh_info *)data;
+
+ h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
+ h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
+ break;
+
+ case VDAMGT_DEV_METRICS:
+
+ m = (struct atto_vda_metrics_info *)data;
+
+ for (i = 0; i < 32; i++)
+ m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_cfg_data(u8 function, void *data)
+{
+ struct atto_vda_cfg_init *ci;
+
+ switch (function) {
+ case VDA_CFG_INIT:
+ case VDA_CFG_GET_INIT:
+ case VDA_CFG_GET_INIT2:
+
+ ci = (struct atto_vda_cfg_init *)data;
+
+ ci->date_time.year = le16_to_cpu(ci->date_time.year);
+ ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
+ ci->vda_version = le32_to_cpu(ci->vda_version);
+ ci->epoch_time = le32_to_cpu(ci->epoch_time);
+ ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
+ ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
+{
+ struct atto_vda_ae_raid *r = &ae->raid;
+ struct atto_vda_ae_lu *l = &ae->lu;
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ r->dwflags = le32_to_cpu(r->dwflags);
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+
+ l->dwevent = le32_to_cpu(l->dwevent);
+ l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
+ l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
+
+ if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
+ l->id.tgtlun_raid.dwinterleave
+ = le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
+ l->id.tgtlun_raid.dwblock_size
+ = le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ default:
+ break;
+ }
+}
+
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_rq_destroy_request(rq, a);
+ spin_lock_irqsave(&a->request_lock, flags);
+ list_add(&rq->comp_list, &a->avail_request);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+}
+
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->request_lock, flags);
+
+ if (unlikely(list_empty(&a->avail_request))) {
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ return NULL;
+ }
+
+ rq = list_first_entry(&a->avail_request, struct esas2r_request,
+ comp_list);
+ list_del(&rq->comp_list);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ esas2r_rq_init_request(rq, a);
+
+ return rq;
+
+}
+
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_debug("completing request %p\n", rq);
+
+ scsi_dma_unmap(rq->cmd);
+
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
+ rq->req_stat,
+ rq->func_rsp.scsi_rsp.scsi_stat,
+ rq->cmd);
+
+ rq->cmd->result =
+ ((esas2r_req_status_to_error(rq->req_stat) << 16)
+ | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+
+ if (rq->req_stat == RS_UNDERRUN)
+ scsi_set_resid(rq->cmd,
+ le32_to_cpu(rq->func_rsp.scsi_rsp.
+ residual_length));
+ else
+ scsi_set_resid(rq->cmd, 0);
+ }
+
+ rq->cmd->scsi_done(rq->cmd);
+
+ esas2r_free_request(a, rq);
+}
+
+/* Run tasklet to handle stuff outside of interrupt context. */
+void esas2r_adapter_tasklet(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF2_TIMER_TICK, &a->flags2);
+ esas2r_timer_tick(a);
+ }
+
+ if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
+ clear_bit(AF2_INT_PENDING, &a->flags2);
+ esas2r_adapter_interrupt(a);
+ }
+
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ if (esas2r_is_tasklet_pending(a)
+ || (test_bit(AF2_INT_PENDING, &a->flags2))
+ || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ esas2r_schedule_tasklet(a);
+ } else {
+ clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
+ }
+}
+
+static void esas2r_timer_callback(unsigned long context);
+
+void esas2r_kickoff_timer(struct esas2r_adapter *a)
+{
+ init_timer(&a->timer);
+
+ a->timer.function = esas2r_timer_callback;
+ a->timer.data = (unsigned long)a;
+ a->timer.expires = jiffies +
+ msecs_to_jiffies(100);
+
+ add_timer(&a->timer);
+}
+
+static void esas2r_timer_callback(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ set_bit(AF2_TIMER_TICK, &a->flags2);
+
+ esas2r_schedule_tasklet(a);
+
+ esas2r_kickoff_timer(a);
+}
+
+/*
+ * Firmware events need to be handled outside of interrupt context
+ * so we schedule a delayed_work to handle them.
+ */
+
+static void
+esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
+{
+ unsigned long flags;
+ struct esas2r_adapter *a = fw_event->a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_off(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 1;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_on(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 0;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
+{
+ int ret;
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi device already exists at id %d", target_id);
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device() called for 0:%d:0",
+ target_id);
+
+ ret = scsi_add_device(a->host, 0, target_id, 0);
+ if (ret) {
+ esas2r_log_dev(
+ ESAS2R_LOG_CRIT,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device failed with %d for id %d",
+ ret, target_id);
+ }
+ }
+}
+
+static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
+{
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_remove_device() called for 0:%d:0",
+ target_id);
+
+ scsi_remove_device(scsi_dev);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_device_put() called");
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "no target found at id %d",
+ target_id);
+ }
+}
+
+/*
+ * Sends a firmware asynchronous event to anyone who happens to be
+ * listening on the defined ATTO VDA event ports.
+ */
+static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
+{
+ struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
+ char *type;
+
+ switch (ae->vda_ae.hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+ type = "RAID group state change";
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ type = "Mapped destination LU change";
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ type = "Physical disk inventory change";
+ break;
+
+ case VDAAE_HDR_TYPE_RESET:
+ type = "Firmware reset";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_INFO:
+ type = "Event Log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_WARN:
+ type = "Event Log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_CRIT:
+ type = "Event Log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_FAIL:
+ type = "Event Log message (FAIL level)";
+ break;
+
+ case VDAAE_HDR_TYPE_NVC:
+ type = "NVCache change";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_INFO:
+ type = "Time stamped log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_WARN:
+ type = "Time stamped log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_CRIT:
+ type = "Time stamped log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_PWRMGT:
+ type = "Power management";
+ break;
+
+ case VDAAE_HDR_TYPE_MUTE:
+ type = "Mute button pressed";
+ break;
+
+ case VDAAE_HDR_TYPE_DEV:
+ type = "Device attribute change";
+ break;
+
+ default:
+ type = "Unknown";
+ break;
+ }
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "An async event of type \"%s\" was received from the firmware. The event contents are:",
+ type);
+ esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
+ ae->vda_ae.hdr.bylength);
+
+}
+
+static void
+esas2r_firmware_event_work(struct work_struct *work)
+{
+ struct esas2r_fw_event_work *fw_event =
+ container_of(work, struct esas2r_fw_event_work, work.work);
+
+ struct esas2r_adapter *a = fw_event->a;
+
+ u16 target_id = *(u16 *)&fw_event->data[0];
+
+ if (a->fw_events_off)
+ goto done;
+
+ switch (fw_event->type) {
+ case fw_event_null:
+ break; /* do nothing */
+
+ case fw_event_lun_change:
+ esas2r_remove_device(a, target_id);
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_present:
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_not_present:
+ esas2r_remove_device(a, target_id);
+ break;
+
+ case fw_event_vda_ae:
+ esas2r_send_ae_event(fw_event);
+ break;
+ }
+
+done:
+ esas2r_free_fw_event(fw_event);
+}
+
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz)
+{
+ struct esas2r_fw_event_work *fw_event;
+ unsigned long flags;
+
+ fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "esas2r_queue_fw_event failed to alloc");
+ return;
+ }
+
+ if (type == fw_event_vda_ae) {
+ struct esas2r_vda_ae *ae =
+ (struct esas2r_vda_ae *)fw_event->data;
+
+ ae->signature = ESAS2R_VDA_EVENT_SIG;
+ ae->bus_number = a->pcid->bus->number;
+ ae->devfn = a->pcid->devfn;
+ memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
+ } else {
+ memcpy(fw_event->data, data, data_sz);
+ }
+
+ fw_event->type = type;
+ fw_event->a = a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &a->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
+ queue_delayed_work_on(
+ smp_processor_id(), a->fw_event_q, &fw_event->work,
+ msecs_to_jiffies(1));
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
+ u8 state)
+{
+ if (state == TS_LUN_CHANGE)
+ esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_present, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_NOT_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
+ sizeof(targ_id));
+}
+
+/* Translate status to a Linux SCSI mid-layer error code */
+int esas2r_req_status_to_error(u8 req_stat)
+{
+ switch (req_stat) {
+ case RS_OVERRUN:
+ case RS_UNDERRUN:
+ case RS_SUCCESS:
+ /*
+ * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
+ * it will check the scsi_stat value in the completion anyway.
+ */
+ case RS_SCSI_ERROR:
+ return DID_OK;
+
+ case RS_SEL:
+ case RS_SEL2:
+ return DID_NO_CONNECT;
+
+ case RS_RESET:
+ return DID_RESET;
+
+ case RS_ABORTED:
+ return DID_ABORT;
+
+ case RS_BUSY:
+ return DID_BUS_BUSY;
+ }
+
+ /* everything else is just an error. */
+
+ return DID_ERROR;
+}
+
+module_init(esas2r_init);
+module_exit(esas2r_exit);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
new file mode 100644
index 000000000000..bf45beaad439
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -0,0 +1,306 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_targdb.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_targ_db_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ memset(t, 0, sizeof(struct esas2r_target));
+
+ t->target_state = TS_NOT_PRESENT;
+ t->buffered_target_state = TS_NOT_PRESENT;
+ t->new_target_state = TS_INVALID;
+ }
+}
+
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_targ_db_remove(a, t);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (notify) {
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
+ a));
+ esas2r_target_state_changed(a, esas2r_targ_get_id(t,
+ a),
+ TS_NOT_PRESENT);
+ }
+ }
+}
+
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ if (test_bit(AF_DISC_PENDING, &a->flags)) {
+ esas2r_trace_exit();
+ return;
+ }
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ u8 state = TS_INVALID;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ if (t->buffered_target_state != t->target_state)
+ state = t->buffered_target_state = t->target_state;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ if (state != TS_INVALID) {
+ esas2r_trace("targ_db_report_changes:%d",
+ esas2r_targ_get_id(
+ t,
+ a));
+ esas2r_trace("state:%d", state);
+
+ esas2r_target_state_changed(a,
+ esas2r_targ_get_id(t,
+ a),
+ state);
+ }
+ }
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *
+ dc)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
+ esas2r_targ_get_id(
+ t,
+ a));
+
+ if (dc->interleave == 0
+ || dc->block_size == 0) {
+ /* these are invalid values, don't create the target entry. */
+
+ esas2r_hdebug("invalid RAID group dimensions");
+
+ esas2r_trace_exit();
+
+ return NULL;
+ }
+
+ t->block_size = dc->block_size;
+ t->inter_byte = dc->interleave;
+ t->inter_block = dc->interleave / dc->block_size;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = ESAS2R_TARG_ID_INV;
+
+ t->flags &= ~TF_PASS_THRU;
+ t->flags |= TF_USED;
+
+ t->identifier_len = 0;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ /* see if we found this device before. */
+
+ t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
+
+ if (t == NULL) {
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (ident_len > sizeof(t->identifier)
+ || t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+ }
+
+ esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
+ dc->curr_virt_id,
+ dc->curr_phys_id);
+
+ t->block_size = 0;
+ t->inter_byte = 0;
+ t->inter_block = 0;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = dc->curr_phys_id;
+ t->identifier_len = ident_len;
+
+ memcpy(t->identifier, ident, ident_len);
+
+ t->flags |= TF_PASS_THRU | TF_USED;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
+{
+ esas2r_trace_enter();
+
+ t->target_state = TS_NOT_PRESENT;
+
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->sas_addr == *sas_addr)
+ return t;
+
+ return NULL;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (ident_len == t->identifier_len
+ && memcmp(&t->identifier[0], identifier,
+ ident_len) == 0)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
+{
+ u16 id = target_id + 1;
+
+ while (id < ESAS2R_MAX_TARGETS) {
+ struct esas2r_target *t = a->targetdb + id;
+
+ if (t->target_state == TS_PRESENT)
+ break;
+
+ id++;
+ }
+
+ return id;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ if (t->virt_targ_id == virt_id)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
+{
+ u16 devcnt = 0;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->target_state == TS_PRESENT)
+ devcnt++;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ return devcnt;
+}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
new file mode 100644
index 000000000000..30028e56df63
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -0,0 +1,524 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_vda.c
+ * esas2r driver VDA firmware interface functions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+static u8 esas2r_vdaioctl_versions[] = {
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_FLASH_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CLI_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CFG_VER,
+ ATTO_VDA_MGT_VER,
+ ATTO_VDA_GSV_VER
+};
+
+static void clear_vda_request(struct esas2r_request *rq);
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Prepare a VDA IOCTL request to be sent to the firmware. */
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u32 datalen = 0;
+ struct atto_vda_sge *firstsg = NULL;
+ u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
+
+ vi->status = ATTO_STS_SUCCESS;
+ vi->vda_status = RS_PENDING;
+
+ if (vi->function >= vercnt) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
+ vi->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
+ vi->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ if (vi->function != VDA_FUNC_SCSI)
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = vi->function;
+ rq->interrupt_cb = esas2r_complete_vda_ioctl;
+ rq->interrupt_cx = vi;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
+ && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
+ && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
+ datalen = vi->data_length;
+
+ rq->vrq->flash.length = cpu_to_le32(datalen);
+ rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
+
+ memcpy(rq->vrq->flash.data.file.file_name,
+ vi->cmd.flash.data.file.file_name,
+ sizeof(vi->cmd.flash.data.file.file_name));
+
+ firstsg = rq->vrq->flash.data.file.sge;
+ break;
+
+ case VDA_FUNC_CLI:
+
+ datalen = vi->data_length;
+
+ rq->vrq->cli.cmd_rsp_len =
+ cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
+ rq->vrq->cli.length = cpu_to_le32(datalen);
+
+ firstsg = rq->vrq->cli.sge;
+ break;
+
+ case VDA_FUNC_MGT:
+ {
+ u8 *cmdcurr_offset = sgc->cur_offset
+ - offsetof(struct atto_ioctl_vda, data)
+ + offsetof(struct atto_ioctl_vda, cmd)
+ + offsetof(struct atto_ioctl_vda_mgt_cmd,
+ data);
+ /*
+ * build the data payload SGL here first since
+ * esas2r_sgc_init() will modify the S/G list offset for the
+ * management SGL (which is built below where the data SGL is
+ * usually built).
+ */
+
+ if (vi->data_length) {
+ u32 payldlen = 0;
+
+ if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
+ || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
+ rq->vrq->mgt.payld_sglst_offset =
+ (u8)offsetof(struct atto_vda_mgmt_req,
+ payld_sge);
+
+ payldlen = vi->data_length;
+ datalen = vi->cmd.mgt.data_length;
+ } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
+ || vi->cmd.mgt.mgt_func ==
+ VDAMGT_DEV_INFO2_BYADDR) {
+ datalen = vi->data_length;
+ cmdcurr_offset = sgc->cur_offset;
+ } else {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ /* Setup the length so building the payload SGL works */
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+
+ if (payldlen) {
+ rq->vrq->mgt.payld_length =
+ cpu_to_le32(payldlen);
+
+ esas2r_sgc_init(sgc, a, rq,
+ rq->vrq->mgt.payld_sge);
+ sgc->length = payldlen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+ } else {
+ datalen = vi->cmd.mgt.data_length;
+
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+ }
+
+ /*
+ * Now that the payload SGL is built, if any, setup to build
+ * the management SGL.
+ */
+ firstsg = rq->vrq->mgt.sge;
+ sgc->cur_offset = cmdcurr_offset;
+
+ /* Finish initializing the management request. */
+ rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
+ rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
+ rq->vrq->mgt.dev_index =
+ cpu_to_le32(vi->cmd.mgt.dev_index);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+ }
+
+ case VDA_FUNC_CFG:
+
+ if (vi->data_length
+ || vi->cmd.cfg.data_length == 0) {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
+ rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ memcpy(&rq->vrq->cfg.data,
+ &vi->cmd.cfg.data,
+ vi->cmd.cfg.data_length);
+
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &rq->vrq->cfg.data);
+ } else {
+ vi->status = ATTO_STS_INV_FUNC;
+
+ return false;
+ }
+
+ break;
+
+ case VDA_FUNC_GSV:
+
+ vi->cmd.gsv.rsp_len = vercnt;
+
+ memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
+ vercnt);
+
+ vi->vda_status = RS_SUCCESS;
+ break;
+
+ default:
+
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (datalen) {
+ esas2r_sgc_init(sgc, a, rq, firstsg);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
+
+ vi->vda_status = rq->req_stat;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
+ || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
+ vi->cmd.flash.data.file.file_size =
+ le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
+
+ break;
+
+ case VDA_FUNC_MGT:
+
+ vi->cmd.mgt.scan_generation =
+ rq->func_rsp.mgt_rsp.scan_generation;
+ vi->cmd.mgt.dev_index = le16_to_cpu(
+ rq->func_rsp.mgt_rsp.dev_index);
+
+ if (vi->data_length == 0)
+ vi->cmd.mgt.data_length =
+ le32_to_cpu(rq->func_rsp.mgt_rsp.length);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+
+ case VDA_FUNC_CFG:
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
+ struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+ char buf[sizeof(cfg->data.init.fw_release) + 1];
+
+ cfg->data_length =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ cfg->data.init.vda_version =
+ le32_to_cpu(rsp->vda_version);
+ cfg->data.init.fw_build = rsp->fw_build;
+
+ snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
+ (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+ (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+
+ memcpy(&cfg->data.init.fw_release, buf,
+ sizeof(cfg->data.init.fw_release));
+
+ if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_build;
+ else
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_release;
+ } else {
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &vi->cmd.cfg.data);
+ }
+
+ break;
+
+ case VDA_FUNC_CLI:
+
+ vi->cmd.cli.cmd_rsp_len =
+ le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
+ break;
+
+ default:
+
+ break;
+ }
+}
+
+/* Build a flash VDA request. */
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_FLASH;
+
+ if (sub_func == VDA_FLASH_BEGINW
+ || sub_func == VDA_FLASH_WRITE
+ || sub_func == VDA_FLASH_READ)
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
+ data.sge);
+
+ vrq->length = cpu_to_le32(length);
+ vrq->flash_addr = cpu_to_le32(addr);
+ vrq->checksum = cksum;
+ vrq->sub_func = sub_func;
+}
+
+/* Build a VDA management request. */
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_MGT;
+
+ vrq->mgt_func = sub_func;
+ vrq->scan_generation = scan_gen;
+ vrq->dev_index = cpu_to_le16(dev_index);
+ vrq->length = cpu_to_le32(length);
+
+ if (vrq->length) {
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, sge);
+
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, prde);
+
+ vrq->prde[0].ctl_len = cpu_to_le32(length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+ }
+
+ if (data) {
+ esas2r_nuxi_mgt_data(sub_func, data);
+
+ memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
+ length);
+ }
+}
+
+/* Build a VDA asyncronous event (AE) request. */
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct atto_vda_ae_req *vrq = &rq->vrq->ae;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_AE;
+
+ vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
+
+ if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
+ vrq->sg_list_offset =
+ (u8)offsetof(struct atto_vda_ae_req, sge);
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
+ prde);
+ vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+}
+
+/* Build a VDA CLI request. */
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len)
+{
+ struct atto_vda_cli_req *vrq = &rq->vrq->cli;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CLI;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
+}
+
+/* Build a VDA IOCTL request. */
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func)
+{
+ struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_IOCTL;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->sub_func = sub_func;
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
+}
+
+/* Build a VDA configuration request. */
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CFG;
+
+ vrq->sub_func = sub_func;
+ vrq->length = cpu_to_le32(length);
+
+ if (data) {
+ esas2r_nuxi_cfg_data(sub_func, data);
+
+ memcpy(&vrq->data, data, length);
+ }
+}
+
+static void clear_vda_request(struct esas2r_request *rq)
+{
+ u32 handle = rq->vrq->scsi.handle;
+
+ memset(rq->vrq, 0, sizeof(*rq->vrq));
+
+ rq->vrq->scsi.handle = handle;
+
+ rq->req_stat = RS_PENDING;
+
+ /* since the data buffer is separate clear that too */
+
+ memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
+
+ /*
+ * Setup next and prev pointer in case the request is not going through
+ * esas2r_start_request().
+ */
+
+ INIT_LIST_HEAD(&rq->req_list);
+}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 34552bf1c023..55548dc5cec3 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
- if (!ent->tag[0]) {
+ if (!ent->orig_tag[0]) {
/* Non-tagged, slot already taken? */
if (lp->non_tagged_cmd)
return -EBUSY;
@@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
return -EBUSY;
}
- BUG_ON(lp->tagged_cmds[ent->tag[1]]);
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
- lp->tagged_cmds[ent->tag[1]] = ent;
+ lp->tagged_cmds[ent->orig_tag[1]] = ent;
lp->num_tagged++;
return 0;
@@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
static void esp_free_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
- if (ent->tag[0]) {
- BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
- lp->tagged_cmds[ent->tag[1]] = NULL;
+ if (ent->orig_tag[0]) {
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
+ lp->tagged_cmds[ent->orig_tag[1]] = NULL;
lp->num_tagged--;
} else {
BUG_ON(lp->non_tagged_cmd != ent);
@@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
ent->tag[0] = 0;
ent->tag[1] = 0;
}
+ ent->orig_tag[0] = ent->tag[0];
+ ent->orig_tag[1] = ent->tag[1];
if (esp_alloc_lun_tag(ent, lp) < 0)
continue;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 28e22acf87ea..cd68805e8d78 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -271,6 +271,7 @@ struct esp_cmd_entry {
#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
u8 tag[2];
+ u8 orig_tag[2];
u8 status;
u8 message;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c18c68150e9f..528d43b7b569 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,6 +27,7 @@
#include "fnic_io.h"
#include "fnic_res.h"
#include "fnic_trace.h"
+#include "fnic_stats.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -38,11 +39,13 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.23"
+#define DRV_VERSION "1.5.0.45"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
+#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
+#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
@@ -154,6 +157,9 @@ do { \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
+#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
+ shost_printk(kern_level, host, fmt, ##args)
+
extern const char *fnic_state_str[];
enum fnic_intx_intr_index {
@@ -215,16 +221,25 @@ struct fnic {
struct vnic_stats *stats;
unsigned long stats_time; /* time of stats update */
+ unsigned long stats_reset_time; /* time of stats reset */
struct vnic_nic_cfg *nic_cfg;
char name[IFNAMSIZ];
struct timer_list notify_timer; /* used for MSI interrupts */
+ unsigned int fnic_max_tag_id;
unsigned int err_intr_offset;
unsigned int link_intr_offset;
unsigned int wq_count;
unsigned int cq_count;
+ struct dentry *fnic_stats_debugfs_host;
+ struct dentry *fnic_stats_debugfs_file;
+ struct dentry *fnic_reset_debugfs_file;
+ unsigned int reset_stats;
+ atomic64_t io_cmpl_skip;
+ struct fnic_stats fnic_stats;
+
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
@@ -359,4 +374,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
return ((fnic->state_flags & st_flags) == st_flags);
}
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
+void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index cbcb0121c84d..b6073f875761 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -23,6 +23,58 @@
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
+static struct dentry *fnic_stats_debugfs_root;
+
+/*
+ * fnic_debugfs_init - Initialize debugfs for fnic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging.
+ */
+int fnic_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG
+ "fnic root directory doesn't exist in debugfs\n");
+ return rc;
+ }
+
+ fnic_stats_debugfs_root = debugfs_create_dir("statistics",
+ fnic_trace_debugfs_root);
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create Statistics directory\n");
+ return rc;
+ }
+
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic.
+ */
+void fnic_debugfs_terminate(void)
+{
+ debugfs_remove(fnic_stats_debugfs_root);
+ fnic_stats_debugfs_root = NULL;
+
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+}
/*
* fnic_trace_ctrl_open - Open the trace_enable file
@@ -241,16 +293,16 @@ static const struct file_operations fnic_trace_debugfs_fops = {
* Description:
* When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the
- * fnic directory. It will create file trace to log fnic trace buffer
- * output into debugfs and it will also create file trace_enable to
- * control enable/disable of trace logging into trace buffer.
+ * create file trace to log fnic trace buffer output into debugfs and
+ * it will also create file trace_enable to control enable/disable of
+ * trace logging into trace buffer.
*/
int fnic_trace_debugfs_init(void)
{
int rc = -1;
- fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
if (!fnic_trace_debugfs_root) {
- printk(KERN_DEBUG "Cannot create debugfs root\n");
+ printk(KERN_DEBUG
+ "FNIC Debugfs root directory doesn't exist\n");
return rc;
}
fnic_trace_enable = debugfs_create_file("tracing_enable",
@@ -259,8 +311,8 @@ int fnic_trace_debugfs_init(void)
NULL, &fnic_trace_ctrl_fops);
if (!fnic_trace_enable) {
- printk(KERN_DEBUG "Cannot create trace_enable file"
- " under debugfs");
+ printk(KERN_DEBUG
+ "Cannot create trace_enable file under debugfs\n");
return rc;
}
@@ -271,7 +323,8 @@ int fnic_trace_debugfs_init(void)
&fnic_trace_debugfs_fops);
if (!fnic_trace_debugfs_file) {
- printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ printk(KERN_DEBUG
+ "Cannot create trace file under debugfs\n");
return rc;
}
rc = 0;
@@ -295,8 +348,323 @@ void fnic_trace_debugfs_terminate(void)
debugfs_remove(fnic_trace_enable);
fnic_trace_enable = NULL;
}
- if (fnic_trace_debugfs_root) {
- debugfs_remove(fnic_trace_debugfs_root);
- fnic_trace_debugfs_root = NULL;
+}
+
+/*
+ * fnic_reset_stats_open - Open the reset_stats file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the stats reset flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file reset_stats and stores i_private data
+ * to debug structure to retrieve later for while performing other
+ * file oprations.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_reset_stats_open(struct inode *inode, struct file *file)
+{
+ struct stats_debug_info *debug;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_reset_stats_read(struct file *file,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ char buf[64];
+ int len;
+
+ len = sprintf(buf, "%u\n", fnic->reset_stats);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of fnic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_reset_stats_write(struct file *file,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ struct fnic *fnic = (struct fnic *)debug->i_private;
+ struct fnic_stats *stats = &fnic->fnic_stats;
+ u64 *io_stats_p = (u64 *)&stats->io_stats;
+ u64 *fw_stats_p = (u64 *)&stats->fw_stats;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic->reset_stats = val;
+
+ if (fnic->reset_stats) {
+ /* Skip variable is used to avoid descrepancies to Num IOs
+ * and IO Completions stats. Skip incrementing No IO Compls
+ * for pending active IOs after reset stats
+ */
+ atomic64_set(&fnic->io_cmpl_skip,
+ atomic64_read(&stats->io_stats.active_ios));
+ memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
+ memset(&stats->term_stats, 0,
+ sizeof(struct terminate_stats));
+ memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
+ memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
+ memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
+ memset(io_stats_p+1, 0,
+ sizeof(struct io_path_stats) - sizeof(u64));
+ memset(fw_stats_p+1, 0,
+ sizeof(struct fw_stats) - sizeof(u64));
}
+
+ (*ppos)++;
+ return cnt;
+}
+
+/*
+ * fnic_reset_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_reset_stats_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ kfree(debug);
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_open - Open the stats file for specific host
+ * and get fnic stats.
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the specific host statistics.
+ *
+ * Description:
+ * This routine opens a debugsfs file stats of specific host and print
+ * fnic stats.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_stats_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ struct fnic *fnic = inode->i_private;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct stats_debug_info *debug;
+ int buf_size = 2 * PAGE_SIZE;
+
+ debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->debug_buffer = vmalloc(buf_size);
+ if (!debug->debug_buffer) {
+ kfree(debug);
+ return -ENOMEM;
+ }
+
+ debug->buf_size = buf_size;
+ memset((void *)debug->debug_buffer, 0, buf_size);
+ debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+
+ file->private_data = debug;
+
+ return 0;
+}
+
+/*
+ * fnic_stats_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_stats_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ struct stats_debug_info *debug = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ debug->debug_buffer,
+ debug->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_stats_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_stats_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ struct stats_debug_info *debug = file->private_data;
+ vfree(debug->debug_buffer);
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations fnic_stats_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_stats_debugfs_open,
+ .read = fnic_stats_debugfs_read,
+ .release = fnic_stats_debugfs_release,
+};
+
+static const struct file_operations fnic_reset_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_reset_stats_open,
+ .read = fnic_reset_stats_read,
+ .write = fnic_reset_stats_write,
+ .release = fnic_reset_stats_release,
+};
+
+/*
+ * fnic_stats_init - Initialize stats struct and create stats file per fnic
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the stats file per fnic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per fnic stats.
+ */
+int fnic_stats_debugfs_init(struct fnic *fnic)
+{
+ int rc = -1;
+ char name[16];
+
+ snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+
+ if (!fnic_stats_debugfs_root) {
+ printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
+ return rc;
+ }
+ fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
+ fnic_stats_debugfs_root);
+ if (!fnic->fnic_stats_debugfs_host) {
+ printk(KERN_DEBUG "Cannot create host directory\n");
+ return rc;
+ }
+
+ fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_stats_debugfs_fops);
+ if (!fnic->fnic_stats_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+
+ fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic->fnic_stats_debugfs_host,
+ fnic,
+ &fnic_reset_debugfs_fops);
+ if (!fnic->fnic_reset_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create host stats file\n");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic stats.
+ */
+void fnic_stats_debugfs_remove(struct fnic *fnic)
+{
+ if (!fnic)
+ return;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_file);
+ fnic->fnic_stats_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_reset_debugfs_file);
+ fnic->fnic_reset_debugfs_file = NULL;
+
+ debugfs_remove(fnic->fnic_stats_debugfs_host);
+ fnic->fnic_stats_debugfs_host = NULL;
}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 902520c65387..1671325aec7f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -302,6 +302,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
int fr_len;
@@ -337,6 +338,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
@@ -354,6 +356,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
struct fcoe_ctlr *fip = &fnic->ctlr;
struct fip_header *fiph;
struct fip_desc *desc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u16 vid;
size_t rlen;
size_t dlen;
@@ -402,6 +405,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
/* any VLAN descriptors present ? */
if (list_empty(&fnic->vlans)) {
/* retry from timer */
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
@@ -533,6 +537,7 @@ drop:
void fnic_handle_fip_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
struct sk_buff *skb;
struct ethhdr *eh;
@@ -567,6 +572,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
* fcf's & restart from scratch
*/
if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+ atomic64_inc(
+ &fnic_stats->vlan_stats.flogi_rejects);
shost_printk(KERN_INFO, fnic->lport->host,
"Trigger a Link down - VLAN Disc\n");
fcoe_ctlr_link_down(&fnic->ctlr);
@@ -753,6 +760,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
struct fc_frame *fp;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
@@ -803,6 +811,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fcs error. dropping packet.\n");
goto drop;
@@ -818,6 +827,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
}
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
@@ -1205,6 +1215,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u64 sol_time;
spin_lock_irqsave(&fnic->fnic_lock, flags);
@@ -1273,6 +1284,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
vlan->state = FIP_VLAN_SENT; /* sent now */
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
vlan->sol_count++;
sol_time = jiffies + msecs_to_jiffies
(FCOE_CTLR_START_DELAY);
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 5c1f223cabce..7d9b54ae7f62 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
if (!pba)
return IRQ_NONE;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
if (pba & (1 << FNIC_INTX_NOTIFY)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
fnic_handle_link_event(fnic);
@@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
struct fnic *fnic = data;
unsigned long work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
struct fnic *fnic = data;
unsigned long rq_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
rq_work_done,
@@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
struct fnic *fnic = data;
unsigned long wq_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
wq_work_done,
@@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done,
@@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{
struct fnic *fnic = data;
+ fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+ atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 42e15ee6e1bb..be09b101b4a1 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
"for fnic trace buffer");
+static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
+module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
+
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
.lport_set_port_id = fnic_set_port_id,
@@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
- scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
+ scsi_activate_tcq(sdev, fnic_max_qdepth);
return 0;
}
@@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
static void fnic_get_host_speed(struct Scsi_Host *shost);
static struct scsi_transport_template *fnic_fc_transport;
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
+static void fnic_reset_host_stats(struct Scsi_Host *);
static struct fc_function_template fnic_fc_functions = {
@@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = {
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
.issue_fc_host_lip = fnic_reset,
.get_fc_host_stats = fnic_get_stats,
+ .reset_fc_host_stats = fnic_reset_host_stats,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.terminate_rport_io = fnic_terminate_rport_io,
.bsg_request = fc_lport_bsg_request,
@@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
- stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
+ stats->seconds_since_last_reset =
+ (jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
return stats;
}
+/*
+ * fnic_dump_fchost_stats
+ * note : dumps fc_statistics into system logs
+ */
+void fnic_dump_fchost_stats(struct Scsi_Host *host,
+ struct fc_host_statistics *stats)
+{
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: seconds since last reset = %llu\n",
+ stats->seconds_since_last_reset);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx frames = %llu\n",
+ stats->tx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: tx words = %llu\n",
+ stats->tx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx frames = %llu\n",
+ stats->rx_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: rx words = %llu\n",
+ stats->rx_words);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: lip count = %llu\n",
+ stats->lip_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: nos count = %llu\n",
+ stats->nos_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: error frames = %llu\n",
+ stats->error_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: dumped frames = %llu\n",
+ stats->dumped_frames);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: link failure count = %llu\n",
+ stats->link_failure_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of sync count = %llu\n",
+ stats->loss_of_sync_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: loss of signal count = %llu\n",
+ stats->loss_of_signal_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: prim seq protocol err count = %llu\n",
+ stats->prim_seq_protocol_err_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid tx word count= %llu\n",
+ stats->invalid_tx_word_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: invalid crc count = %llu\n",
+ stats->invalid_crc_count);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input requests = %llu\n",
+ stats->fcp_input_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output requests = %llu\n",
+ stats->fcp_output_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp control requests = %llu\n",
+ stats->fcp_control_requests);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp input megabytes = %llu\n",
+ stats->fcp_input_megabytes);
+ FNIC_MAIN_NOTE(KERN_NOTICE, host,
+ "fnic: fcp output megabytes = %llu\n",
+ stats->fcp_output_megabytes);
+ return;
+}
+
+/*
+ * fnic_reset_host_stats : clears host stats
+ * note : called when reset_statistics set under sysfs dir
+ */
+static void fnic_reset_host_stats(struct Scsi_Host *host)
+{
+ int ret;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct fc_host_statistics *stats;
+ unsigned long flags;
+
+ /* dump current stats, before clearing them */
+ stats = fnic_get_stats(host);
+ fnic_dump_fchost_stats(host, stats);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ ret = vnic_dev_stats_clear(fnic->vdev);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (ret) {
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic: Reset vnic stats failed"
+ " 0x%x", ret);
+ return;
+ }
+ fnic->stats_reset_time = jiffies;
+ memset(stats, 0, sizeof(*stats));
+
+ return;
+}
+
void fnic_log_q_error(struct fnic *fnic)
{
unsigned int i;
@@ -447,11 +556,11 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport;
- err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
+ err = fnic_stats_debugfs_init(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to alloc shared tag map\n");
- goto err_out_free_hba;
+ "Failed to initialize debugfs for stats\n");
+ fnic_stats_debugfs_remove(fnic);
}
/* Setup PCI resources */
@@ -476,10 +585,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 40-bit first, and
+ * limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -496,10 +605,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 40-bit DMA "
+ "Unable to obtain 64-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
@@ -566,6 +675,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"aborting.\n");
goto err_out_dev_close;
}
+
+ /* Configure Maximum Outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+ }
+ fnic->fnic_max_tag_id = host->can_queue;
+
+ err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to alloc shared tag map\n");
+ goto err_out_dev_close;
+ }
+
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
host->max_cmd_len = FCOE_MAX_CMD_LEN;
@@ -719,6 +844,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fc_lport_init_stats(lp);
+ fnic->stats_reset_time = jiffies;
fc_lport_config(lp);
@@ -798,6 +924,7 @@ err_out_release_regions:
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_hba:
+ fnic_stats_debugfs_remove(fnic);
scsi_host_put(lp->host);
err_out:
return err;
@@ -850,6 +977,7 @@ static void fnic_remove(struct pci_dev *pdev)
fcoe_ctlr_destroy(&fnic->ctlr);
fc_lport_destroy(lp);
+ fnic_stats_debugfs_remove(fnic);
/*
* This stops the fnic device, masks all interrupts. Completed
@@ -895,6 +1023,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Create debugfs entries for fnic */
+ err = fnic_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to create fnic directory "
+ "for tracing and stats logging\n");
+ fnic_debugfs_terminate();
+ }
+
/* Allocate memory for trace buffer */
err = fnic_trace_buf_init();
if (err < 0) {
@@ -983,6 +1119,7 @@ err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
fnic_trace_free();
+ fnic_debugfs_terminate();
return err;
}
@@ -999,6 +1136,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
+ fnic_debugfs_terminate();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index a97e6e584f8c..0521436d05d6 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
return &fnic->io_req_lock[hash];
}
+static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
+ int tag)
+{
+ return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
+}
+
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -220,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
- else
+ else {
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(
+ &fnic->fnic_stats.fw_stats.active_fw_reqs));
+ }
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
- if (!ret)
+ if (!ret) {
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else {
+ } else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
@@ -285,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
fc_id, fnic->ctlr.map_dest, gw_mac);
}
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
flogi_reg_ioreq_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return ret;
@@ -304,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
u8 pri_tag = 0;
unsigned int i;
unsigned long intr_flags;
@@ -352,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
+ atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -380,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
rport->maxframe_size, rp->r_a_tov,
rp->e_d_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0;
}
@@ -395,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
struct fnic *fnic = lport_priv(lp);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret;
u64 cmd_trace;
@@ -408,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
@@ -430,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -456,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC);
if (!io_req->sgl_list) {
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -503,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_free(io_req, fnic->io_req_pool);
}
} else {
+ atomic64_inc(&fnic_stats->io_stats.active_ios);
+ atomic64_inc(&fnic_stats->io_stats.num_ios);
+ if (atomic64_read(&fnic_stats->io_stats.active_ios) >
+ atomic64_read(&fnic_stats->io_stats.max_active_ios))
+ atomic64_set(&fnic_stats->io_stats.max_active_ios,
+ atomic64_read(&fnic_stats->io_stats.active_ios));
+
/* REVISIT: Use per IO lock in the final code */
CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
@@ -536,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
+ atomic64_inc(&reset_stats->fw_reset_completions);
+
/* Clean up all outstanding io requests */
fnic_cleanup_io(fnic, SCSI_NO_TAG);
+ atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
+ atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+
spin_lock_irqsave(&fnic->fnic_lock, flags);
/* fnic should be in FC_TRANS_ETH_MODE */
@@ -565,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
* reset the firmware. Free the cached flogi
*/
fnic->state = FNIC_IN_FC_MODE;
+ atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
@@ -572,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic->lport->host,
"Unexpected state %s while processing"
" reset cmpl\n", fnic_state_to_str(fnic->state));
+ atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
@@ -695,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
if (is_ack_index_in_range(wq, request_out)) {
fnic->fw_ack_index[0] = request_out;
fnic->fw_ack_recd[0] = 1;
- }
+ } else
+ atomic64_inc(
+ &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
@@ -720,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct fcpio_icmnd_cmpl *icmnd_cmpl;
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
spinlock_t *io_lock;
u64 cmd_trace;
@@ -730,7 +782,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fcpio_tag_id_dec(&tag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ) {
+ if (id >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
@@ -740,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
@@ -760,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
@@ -818,63 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual;
- /*
- * If queue_full, then try to reduce queue depth for all
- * LUNS on the target. Todo: this should be accompanied
- * by a periodic queue_depth rampup based on successful
- * IO completion.
- */
- if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
- struct scsi_device *t_sdev;
- int qd = 0;
-
- shost_for_each_device(t_sdev, sc->device->host) {
- if (t_sdev->id != sc->device->id)
- continue;
-
- if (t_sdev->queue_depth > 1) {
- qd = scsi_track_queue_full
- (t_sdev,
- t_sdev->queue_depth - 1);
- if (qd == -1)
- qd = t_sdev->host->cmd_per_lun;
- shost_printk(KERN_INFO,
- fnic->lport->host,
- "scsi[%d:%d:%d:%d"
- "] queue full detected,"
- "new depth = %d\n",
- t_sdev->host->host_no,
- t_sdev->channel,
- t_sdev->id, t_sdev->lun,
- t_sdev->queue_depth);
- }
- }
- }
+ if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+ atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
break;
case FCPIO_TIMEOUT: /* request was timed out */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_ABORTED: /* request was aborted */
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+ atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
+ atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
break;
- case FCPIO_INVALID_HEADER: /* header contains invalid data */
- case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
- case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
+ atomic64_inc(&fnic_stats->io_stats.io_not_found);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
- case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
case FCPIO_FW_ERR: /* request was terminated due fw error */
+ atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
+ atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+ break;
+
+ case FCPIO_INVALID_HEADER: /* header contains invalid data */
+ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
+ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
default:
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
@@ -882,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
break;
}
+ if (hdr_status != FCPIO_SUCCESS) {
+ atomic64_inc(&fnic_stats->io_stats.io_failures);
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE;
@@ -915,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
@@ -932,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
u32 id;
struct scsi_cmnd *sc;
struct fnic_io_req *io_req;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time;
@@ -939,7 +999,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
@@ -949,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
if (!sc) {
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), id);
@@ -959,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
@@ -983,15 +1045,48 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ break;
+ case FCPIO_TIMEOUT:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_fw_timeouts);
+ else
+ atomic64_inc(
+ &term_stats->terminate_fw_timeouts);
+ break;
+ case FCPIO_IO_NOT_FOUND:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_io_not_found);
+ else
+ atomic64_inc(
+ &term_stats->terminate_io_not_found);
+ break;
+ default:
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ atomic64_inc(&abts_stats->abort_failures);
+ else
+ atomic64_inc(
+ &term_stats->terminate_failures);
+ break;
+ }
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
-
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1095,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fnic *fnic = vnic_dev_priv(vdev);
switch (desc->hdr.type) {
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+ case FCPIO_RESET_CMPL: /* fw completed reset */
+ atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ break;
+ default:
+ break;
+ }
+
+ switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc);
break;
@@ -1148,23 +1255,26 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
- unsigned int i;
+ int i;
struct fnic_io_req *io_req;
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
unsigned long start_time = 0;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
+ for (i = 0; i < fnic->fnic_max_tag_id; i++) {
if (i == exclude_id)
continue;
+ io_lock = fnic_io_lock_tag(fnic, i);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, i);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
+ }
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@@ -1205,6 +1315,11 @@ cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
" DID_TRANSPORT_DISRUPTED\n");
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
/* Complete the command to SCSI */
if (sc->scsi_done) {
FNIC_TRACE(fnic_cleanup_io,
@@ -1236,7 +1351,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
fcpio_tag_id_dec(&desc->hdr.tag, &id);
id &= FNIC_TAG_MASK;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= fnic->fnic_max_tag_id)
return;
sc = scsi_host_find_tag(fnic->lport->host, id);
@@ -1288,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spin_lock_irqsave(host->host_lock, flags);
@@ -1309,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
atomic_dec(&fnic->in_flight);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_queue_abort_io_req: failure: no descriptors\n");
+ atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
0, task_req, tag, fc_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
atomic_dec(&fnic->in_flight);
@@ -1325,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
int abt_tag;
+ int term_cnt = 0;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_cmnd *sc;
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
@@ -1340,14 +1466,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
if (fnic->in_remove)
return;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1391,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst sc 0x%p\n",
@@ -1427,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
}
}
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
}
@@ -1436,17 +1568,37 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
int abt_tag;
+ int term_cnt = 0;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
- struct fc_rport_libfc_priv *rdata = rport->dd_data;
- struct fc_lport *lport = rdata->local_port;
- struct fnic *fnic = lport_priv(lport);
+ struct fc_rport_libfc_priv *rdata;
+ struct fc_lport *lport;
+ struct fnic *fnic;
struct fc_rport *cmd_rport;
+ struct reset_stats *reset_stats;
+ struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
+ if (!rport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ return;
+ }
+ rdata = rport->dd_data;
+
+ if (!rdata) {
+ printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
+ return;
+ }
+ lport = rdata->local_port;
+
+ if (!lport) {
+ printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
+ return;
+ }
+ fnic = lport_priv(lport);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
" wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@@ -1456,18 +1608,24 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove)
return;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ reset_stats = &fnic->fnic_stats.reset_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag;
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
- if (!sc)
+ if (!sc) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
+ }
cmd_rport = starget_to_rport(scsi_target(sc->device));
- if (rport != cmd_rport)
+ if (rport != cmd_rport) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1509,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
@@ -1545,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
+ atomic64_inc(&term_stats->terminates);
+ term_cnt++;
}
}
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
+ atomic64_set(&term_stats->max_terminates, term_cnt);
}
@@ -1567,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
int ret = SUCCESS;
u32 task_req = 0;
struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct abort_stats *abts_stats;
+ struct terminate_stats *term_stats;
int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -1577,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ abts_stats = &fnic->fnic_stats.abts_stats;
+ term_stats = &fnic->fnic_stats.term_stats;
+
rport = starget_to_rport(scsi_target(sc->device));
tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
@@ -1635,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
*/
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
- else
+ else {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
+ }
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
@@ -1651,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
- if (task_req == FCPIO_ITMF_ABT_TASK)
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
- else
+ atomic64_inc(&fnic_stats->abts_stats.aborts);
+ } else {
CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ atomic64_inc(&fnic_stats->term_stats.terminates);
+ }
/*
* We queued an abort IO, wait for its completion.
@@ -1672,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
@@ -1680,13 +1856,24 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
+ FNIC_SCSI_DBG(KERN_INFO,
+ fnic->lport->host, "Abort Driver Timeout\n");
+ atomic64_inc(&abts_stats->abort_drv_timeouts);
+ } else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Terminate Driver Timeout\n");
+ atomic64_inc(&term_stats->terminate_drv_timeouts);
+ }
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+
/*
* firmware completed the abort, check the status,
* free the io_req irrespective of failure or success
@@ -1724,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
@@ -1745,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
if (!vnic_wq_copy_desc_avail(wq)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"queue_dr_io_req failure - no descriptors\n");
+ atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1757,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
atomic_dec(&fnic->in_flight);
@@ -1784,17 +1979,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
DECLARE_COMPLETION_ONSTACK(tm_done);
enum fnic_ioreq_state old_ioreq_state;
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
+ io_lock = fnic_io_lock_tag(fnic, tag);
+ spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
* this lun
*/
- if (!sc || sc == lr_sc || sc->device != lun_dev)
+ if (!sc || sc == lr_sc || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
continue;
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ }
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1823,6 +2019,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
old_ioreq_state = CMD_STATE(sc);
/*
* Any pending IO issued prior to reset is expected to be
@@ -1833,11 +2034,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
*/
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
- "%s: io_req->abts_done is set state is %s\n",
- __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
-
BUG_ON(io_req->abts_done);
abt_tag = tag;
@@ -1890,12 +2086,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1989,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
unsigned long flags;
unsigned long start_time = 0;
struct scsi_lun fc_lun;
+ struct fnic_stats *fnic_stats;
+ struct reset_stats *reset_stats;
int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
@@ -2000,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
+ fnic_stats = &fnic->fnic_stats;
+ reset_stats = &fnic->fnic_stats.reset_stats;
+
+ atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2010,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
goto fnic_device_reset_end;
/* Check if remote port up */
- if (fc_remote_port_chkready(rport))
+ if (fc_remote_port_chkready(rport)) {
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
goto fnic_device_reset_end;
+ }
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
@@ -2087,14 +2292,15 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* gets cleaned up during higher levels of EH
*/
if (status == FCPIO_INVALID_CODE) {
+ atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
- * Issue abort and terminate on the device reset request.
- * If q'ing of the abort fails, retry issue it after a delay.
+ * Issue abort and terminate on device reset request.
+ * If q'ing of terminate fails, retry it after a delay.
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
@@ -2200,6 +2406,10 @@ fnic_device_reset_end:
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
+
+ if (ret == FAILED)
+ atomic64_inc(&reset_stats->device_reset_failures);
+
return ret;
}
@@ -2208,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost)
{
struct fc_lport *lp;
struct fnic *fnic;
- int ret = SUCCESS;
+ int ret = 0;
+ struct reset_stats *reset_stats;
lp = shost_priv(shost);
fnic = lport_priv(lp);
+ reset_stats = &fnic->fnic_stats.reset_stats;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_reset called\n");
+ atomic64_inc(&reset_stats->fnic_resets);
+
/*
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
- if (lp->tt.lport_reset(lp))
- ret = FAILED;
+ ret = lp->tt.lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
- (ret == SUCCESS) ?
+ (ret == 0) ?
"SUCCESS" : "FAILED");
+ if (ret == 0)
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+ else
+ atomic64_inc(&reset_stats->fnic_reset_failures);
+
return ret;
}
@@ -2252,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
- ret = fnic_reset(shost);
+ ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
if (ret == SUCCESS) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
@@ -2405,7 +2623,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
lun_dev = lr_sc->device;
/* walk again to check, if IOs are still pending in fw */
- for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
new file mode 100644
index 000000000000..540cceb843cd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_STATS_H_
+#define _FNIC_STATS_H_
+struct io_path_stats {
+ atomic64_t active_ios;
+ atomic64_t max_active_ios;
+ atomic64_t io_completions;
+ atomic64_t io_failures;
+ atomic64_t ioreq_null;
+ atomic64_t alloc_failures;
+ atomic64_t sc_null;
+ atomic64_t io_not_found;
+ atomic64_t num_ios;
+};
+
+struct abort_stats {
+ atomic64_t aborts;
+ atomic64_t abort_failures;
+ atomic64_t abort_drv_timeouts;
+ atomic64_t abort_fw_timeouts;
+ atomic64_t abort_io_not_found;
+};
+
+struct terminate_stats {
+ atomic64_t terminates;
+ atomic64_t max_terminates;
+ atomic64_t terminate_drv_timeouts;
+ atomic64_t terminate_fw_timeouts;
+ atomic64_t terminate_io_not_found;
+ atomic64_t terminate_failures;
+};
+
+struct reset_stats {
+ atomic64_t device_resets;
+ atomic64_t device_reset_failures;
+ atomic64_t device_reset_aborts;
+ atomic64_t device_reset_timeouts;
+ atomic64_t device_reset_terminates;
+ atomic64_t fw_resets;
+ atomic64_t fw_reset_completions;
+ atomic64_t fw_reset_failures;
+ atomic64_t fnic_resets;
+ atomic64_t fnic_reset_completions;
+ atomic64_t fnic_reset_failures;
+};
+
+struct fw_stats {
+ atomic64_t active_fw_reqs;
+ atomic64_t max_fw_reqs;
+ atomic64_t fw_out_of_resources;
+ atomic64_t io_fw_errs;
+};
+
+struct vlan_stats {
+ atomic64_t vlan_disc_reqs;
+ atomic64_t resp_withno_vlanID;
+ atomic64_t sol_expiry_count;
+ atomic64_t flogi_rejects;
+};
+
+struct misc_stats {
+ u64 last_isr_time;
+ u64 last_ack_time;
+ atomic64_t isr_count;
+ atomic64_t max_cq_entries;
+ atomic64_t ack_index_out_of_range;
+ atomic64_t data_count_mismatch;
+ atomic64_t fcpio_timeout;
+ atomic64_t fcpio_aborted;
+ atomic64_t sgl_invalid;
+ atomic64_t mss_invalid;
+ atomic64_t abts_cpwq_alloc_failures;
+ atomic64_t devrst_cpwq_alloc_failures;
+ atomic64_t io_cpwq_alloc_failures;
+ atomic64_t no_icmnd_itmf_cmpls;
+ atomic64_t queue_fulls;
+ atomic64_t rport_not_ready;
+ atomic64_t frame_errors;
+};
+
+struct fnic_stats {
+ struct io_path_stats io_stats;
+ struct abort_stats abts_stats;
+ struct terminate_stats term_stats;
+ struct reset_stats reset_stats;
+ struct fw_stats fw_stats;
+ struct vlan_stats vlan_stats;
+ struct misc_stats misc_stats;
+};
+
+struct stats_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buf_size;
+ int buffer_len;
+};
+
+int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
+int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_remove(struct fnic *);
+#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 23a60e3d8527..e002e7187dc0 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -189,6 +189,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
}
/*
+ * fnic_get_stats_data - Copy fnic stats buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
+ *
+ * Description:
+ * This routine gathers the fnic stats debugfs data from the fnic_stats struct
+ * and dumps it to stats_debug_info.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into
+ * stats_debug_info
+ */
+int fnic_get_stats_data(struct stats_debug_info *debug,
+ struct fnic_stats *stats)
+{
+ int len = 0;
+ int buf_size = debug->buf_size;
+ struct timespec val1, val2;
+
+ len = snprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tIO Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
+ "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
+ "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
+ "Number of Memory alloc Failures: %lld\n"
+ "Number of IOREQ Null: %lld\n"
+ "Number of SCSI cmd pointer Null: %lld\n",
+ (u64)atomic64_read(&stats->io_stats.active_ios),
+ (u64)atomic64_read(&stats->io_stats.max_active_ios),
+ (u64)atomic64_read(&stats->io_stats.num_ios),
+ (u64)atomic64_read(&stats->io_stats.io_completions),
+ (u64)atomic64_read(&stats->io_stats.io_failures),
+ (u64)atomic64_read(&stats->io_stats.io_not_found),
+ (u64)atomic64_read(&stats->io_stats.alloc_failures),
+ (u64)atomic64_read(&stats->io_stats.ioreq_null),
+ (u64)atomic64_read(&stats->io_stats.sc_null));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tAbort Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Aborts: %lld\n"
+ "Number of Abort Failures: %lld\n"
+ "Number of Abort Driver Timeouts: %lld\n"
+ "Number of Abort FW Timeouts: %lld\n"
+ "Number of Abort IO NOT Found: %lld\n",
+ (u64)atomic64_read(&stats->abts_stats.aborts),
+ (u64)atomic64_read(&stats->abts_stats.abort_failures),
+ (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
+ (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tTerminate Statistics\n"
+ "------------------------------------------\n");
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Terminates: %lld\n"
+ "Maximum Terminates: %lld\n"
+ "Number of Terminate Driver Timeouts: %lld\n"
+ "Number of Terminate FW Timeouts: %lld\n"
+ "Number of Terminate IO NOT Found: %lld\n"
+ "Number of Terminate Failures: %lld\n",
+ (u64)atomic64_read(&stats->term_stats.terminates),
+ (u64)atomic64_read(&stats->term_stats.max_terminates),
+ (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
+ (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
+ (u64)atomic64_read(&stats->term_stats.terminate_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tReset Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Device Resets: %lld\n"
+ "Number of Device Reset Failures: %lld\n"
+ "Number of Device Reset Aborts: %lld\n"
+ "Number of Device Reset Timeouts: %lld\n"
+ "Number of Device Reset Terminates: %lld\n"
+ "Number of FW Resets: %lld\n"
+ "Number of FW Reset Completions: %lld\n"
+ "Number of FW Reset Failures: %lld\n"
+ "Number of Fnic Reset: %lld\n"
+ "Number of Fnic Reset Completions: %lld\n"
+ "Number of Fnic Reset Failures: %lld\n",
+ (u64)atomic64_read(&stats->reset_stats.device_resets),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
+ (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
+ (u64)atomic64_read(
+ &stats->reset_stats.device_reset_terminates),
+ (u64)atomic64_read(&stats->reset_stats.fw_resets),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
+ (u64)atomic64_read(&stats->reset_stats.fnic_resets),
+ (u64)atomic64_read(
+ &stats->reset_stats.fnic_reset_completions),
+ (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tFirmware Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Active FW Requests %lld\n"
+ "Maximum FW Requests: %lld\n"
+ "Number of FW out of resources: %lld\n"
+ "Number of FW IO errors: %lld\n",
+ (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
+ (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
+ (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tVlan Discovery Statistics\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Number of Vlan Discovery Requests Sent %lld\n"
+ "Vlan Response Received with no FCF VLAN ID: %lld\n"
+ "No solicitations recvd after vlan set, expiry count: %lld\n"
+ "Flogi rejects count: %lld\n",
+ (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
+ (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
+ (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
+ (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\n------------------------------------------\n"
+ "\t\tOther Important Statistics\n"
+ "------------------------------------------\n");
+
+ jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Last ISR time: %llu (%8lu.%8lu)\n"
+ "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Number of ISRs: %lld\n"
+ "Maximum CQ Entries: %lld\n"
+ "Number of ACK index out of range: %lld\n"
+ "Number of data count mismatch: %lld\n"
+ "Number of FCPIO Timeouts: %lld\n"
+ "Number of FCPIO Aborted: %lld\n"
+ "Number of SGL Invalid: %lld\n"
+ "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
+ "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
+ "Number of Copy WQ Alloc Failures for IOs: %lld\n"
+ "Number of no icmnd itmf Completions: %lld\n"
+ "Number of QUEUE Fulls: %lld\n"
+ "Number of rport not ready: %lld\n"
+ "Number of receive frame errors: %lld\n",
+ (u64)stats->misc_stats.last_isr_time,
+ val1.tv_sec, val1.tv_nsec,
+ (u64)stats->misc_stats.last_ack_time,
+ val2.tv_sec, val2.tv_nsec,
+ (u64)atomic64_read(&stats->misc_stats.isr_count),
+ (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
+ (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
+ (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
+ (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
+ (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
+ (u64)atomic64_read(
+ &stats->misc_stats.abts_cpwq_alloc_failures),
+ (u64)atomic64_read(
+ &stats->misc_stats.devrst_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
+ (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+ (u64)atomic64_read(&stats->misc_stats.queue_fulls),
+ (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors));
+
+ return len;
+
+}
+
+/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
* Description:
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index cef42b4c4d6c..d412f2ee3c4f 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -84,7 +84,8 @@ fnic_trace_data_t *fnic_trace_get_buf(void);
int fnic_get_trace_data(fnic_dbgfs_t *);
int fnic_trace_buf_init(void);
void fnic_trace_free(void);
+int fnic_debugfs_init(void);
+void fnic_debugfs_terminate(void);
int fnic_trace_debugfs_init(void);
void fnic_trace_debugfs_terminate(void);
-
#endif
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index fbb55364e272..e343e1d0f801 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -54,8 +54,8 @@
#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
-#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1
+#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index df0c3c71ea43..f334859024c0 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -316,6 +316,12 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
+static unsigned int shost_eh_deadline;
+
+module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(eh_deadline,
+ "SCSI EH timeout in seconds (should be between 1 and 2^32-1)");
+
static struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
@@ -388,6 +394,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering;
shost->ordered_tag = sht->ordered_tag;
+ shost->eh_deadline = shost_eh_deadline * HZ;
if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 7f4f790a3d71..fb5a89815150 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -54,7 +54,7 @@
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.2-1"
+#define HPSA_DRIVER_VERSION "3.4.0-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -89,17 +89,17 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
@@ -107,7 +107,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -125,24 +137,35 @@ static struct board_type products[] = {
{0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access},
- {0x324a103C, "Smart Array P712m", &SA5_access},
- {0x324b103C, "Smart Array P711m", &SA5_access},
+ {0x324A103C, "Smart Array P712m", &SA5_access},
+ {0x324B103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array P222", &SA5_access},
{0x3351103C, "Smart Array P420", &SA5_access},
{0x3352103C, "Smart Array P421", &SA5_access},
{0x3353103C, "Smart Array P822", &SA5_access},
+ {0x334D103C, "Smart Array P822se", &SA5_access},
{0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access},
- {0x1920103C, "Smart Array", &SA5_access},
- {0x1921103C, "Smart Array", &SA5_access},
- {0x1922103C, "Smart Array", &SA5_access},
- {0x1923103C, "Smart Array", &SA5_access},
- {0x1924103C, "Smart Array", &SA5_access},
- {0x1925103C, "Smart Array", &SA5_access},
- {0x1926103C, "Smart Array", &SA5_access},
- {0x1928103C, "Smart Array", &SA5_access},
- {0x334d103C, "Smart Array P822se", &SA5_access},
+ {0x1921103C, "Smart Array P830i", &SA5_access},
+ {0x1922103C, "Smart Array P430", &SA5_access},
+ {0x1923103C, "Smart Array P431", &SA5_access},
+ {0x1924103C, "Smart Array P830", &SA5_access},
+ {0x1926103C, "Smart Array P731m", &SA5_access},
+ {0x1928103C, "Smart Array P230i", &SA5_access},
+ {0x1929103C, "Smart Array P530", &SA5_access},
+ {0x21BD103C, "Smart Array", &SA5_access},
+ {0x21BE103C, "Smart Array", &SA5_access},
+ {0x21BF103C, "Smart Array", &SA5_access},
+ {0x21C0103C, "Smart Array", &SA5_access},
+ {0x21C1103C, "Smart Array", &SA5_access},
+ {0x21C2103C, "Smart Array", &SA5_access},
+ {0x21C3103C, "Smart Array", &SA5_access},
+ {0x21C4103C, "Smart Array", &SA5_access},
+ {0x21C5103C, "Smart Array", &SA5_access},
+ {0x21C7103C, "Smart Array", &SA5_access},
+ {0x21C8103C, "Smart Array", &SA5_access},
+ {0x21C9103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -583,7 +606,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
if (likely(h->msix_vector))
c->Header.ReplyQueue =
- smp_processor_id() % h->nreply_queues;
+ raw_smp_processor_id() % h->nreply_queues;
}
}
@@ -1054,7 +1077,7 @@ free_and_out:
}
/*
- * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
+ * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
* Assume's h->devlock is held.
*/
static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
@@ -1205,8 +1228,8 @@ static void complete_scsi_command(struct CommandList *cp)
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
- cmd->scsi_done(cmd);
cmd_free(h, cp);
+ cmd->scsi_done(cmd);
return;
}
@@ -1379,8 +1402,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
- cmd->scsi_done(cmd);
cmd_free(h, cp);
+ cmd->scsi_done(cmd);
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -2721,7 +2744,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
} while (test_and_set_bit
(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
- h->nr_allocs++;
spin_unlock_irqrestore(&h->lock, flags);
c = h->cmd_pool + i;
@@ -2793,7 +2815,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
spin_lock_irqsave(&h->lock, flags);
clear_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
- h->nr_frees++;
spin_unlock_irqrestore(&h->lock, flags);
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 981647989bfd..bc85e7244f40 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -98,8 +98,6 @@ struct ctlr_info {
struct ErrorInfo *errinfo_pool;
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
- int nr_allocs;
- int nr_frees;
int scan_finished;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e31caa21ddf..23f5ba5e6472 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
- return -EIO;
+ /* If failure is received, the host adapter is most likely going
+ through reset, return success so the caller will wait for the command
+ being cancelled to get returned */
+ return 0;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
- return -EIO;
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ the caller will wait for the command being cancelled to get returned */
+ return 0;
+ default:
+ return -EIO;
+ };
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..fa764406df68 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -241,7 +241,7 @@ static void gather_partition_info(void)
struct device_node *rootdn;
const char *ppartition_name;
- const unsigned int *p_number_ptr;
+ const __be32 *p_number_ptr;
/* Retrieve information about this partition */
rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@ static void gather_partition_info(void)
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
- partition_number = *p_number_ptr;
+ partition_number = of_read_number(p_number_ptr, 1);
of_node_put(rootdn);
}
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
- hostdata->madapter_info.partition_number = partition_number;
+ hostdata->madapter_info.partition_number =
+ cpu_to_be32(partition_number);
- hostdata->madapter_info.mad_version = 1;
- hostdata->madapter_info.os_type = 2;
+ hostdata->madapter_info.mad_version = cpu_to_be32(1);
+ hostdata->madapter_info.os_type = cpu_to_be32(2);
}
/**
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
- evt->crq.IU_length = sizeof(*evt->xfer_iu);
- evt->crq.IU_data_ptr = pool->iu_token +
- sizeof(*evt->xfer_iu) * i;
+ evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
+ evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
+ sizeof(*evt->xfer_iu) * i);
evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata;
evt->ext_list = NULL;
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
evt_struct->cmnd_done = NULL;
evt_struct->sync_srp = NULL;
evt_struct->crq.format = format;
- evt_struct->crq.timeout = timeout;
+ evt_struct->crq.timeout = cpu_to_be16(timeout);
evt_struct->done = done;
}
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i;
- descr->va = sg_dma_address(sg);
- descr->len = sg_dma_len(sg);
+ descr->va = cpu_to_be64(sg_dma_address(sg));
+ descr->len = cpu_to_be32(sg_dma_len(sg));
descr->key = 0;
total_length += sg_dma_len(sg);
}
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
}
indirect->table_desc.va = 0;
- indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(struct srp_direct_buf));
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]);
- indirect->len = total_length;
+ indirect->len = cpu_to_be32(total_length);
return 1;
}
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
- indirect->len = total_length;
- indirect->table_desc.va = evt_struct->ext_list_token;
- indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
+ indirect->len = cpu_to_be32(total_length);
+ indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
+ indirect->table_desc.len = cpu_to_be32(sg_mapped *
+ sizeof(indirect->desc_list[0]));
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
return 1;
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata,
unsigned long timeout)
{
- u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
+ __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
int request_status = 0;
int rc;
int srp_req = 0;
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
add_timer(&evt_struct->timer);
}
- if ((rc =
- ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+ rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+ if (rc != 0) {
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
memcpy(cmnd->sense_buffer,
rsp->data,
- rsp->sense_data_len);
+ be32_to_cpu(rsp->sense_data_len));
unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
- scsi_set_resid(cmnd, rsp->data_out_res_cnt);
+ scsi_set_resid(cmnd,
+ be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
- scsi_set_resid(cmnd, rsp->data_in_res_cnt);
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
}
if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
- srp_cmd->lun = ((u64) lun) << 48;
+ srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
out_fmt == SRP_DATA_DESC_INDIRECT) &&
indirect->table_desc.va == 0) {
- indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
+ indirect->table_desc.va =
+ cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
offsetof(struct srp_cmd, add_data) +
- offsetof(struct srp_indirect_buf, desc_list);
+ offsetof(struct srp_indirect_buf, desc_list));
}
return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* request_limit could have been set to -1 by this client.
*/
atomic_set(&hostdata->request_limit,
- evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
+ be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login = &evt_struct->iu.srp.login_req;
memset(login, 0, sizeof(*login));
login->opcode = SRP_LOGIN_REQ;
- login->req_it_iu_len = sizeof(union srp_iu);
- login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+ login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
+ login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
evt_struct->xfer_iu->mad.capabilities.common.status);
} else {
- if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+ if (hostdata->caps.migration.common.server_support !=
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Partition migration not supported\n");
if (client_reserve) {
if (hostdata->caps.reserve.common.server_support ==
- SERVER_SUPPORTS_CAP)
+ cpu_to_be16(SERVER_SUPPORTS_CAP))
dev_info(hostdata->dev, "Client reserve enabled\n");
else
dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.capabilities;
memset(req, 0, sizeof(*req));
- hostdata->caps.flags = CAP_LIST_SUPPORTED;
+ hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
if (hostdata->client_migrated)
- hostdata->caps.flags |= CLIENT_MIGRATED;
+ hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
- req->common.type = VIOSRP_CAPABILITIES_TYPE;
- req->buffer = hostdata->caps_addr;
+ req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
+ req->buffer = cpu_to_be64(hostdata->caps_addr);
- hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
- hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
- hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.migration.ecl = 1;
+ hostdata->caps.migration.common.cap_type =
+ cpu_to_be32(MIGRATION_CAPABILITIES);
+ hostdata->caps.migration.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.migration));
+ hostdata->caps.migration.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.migration.ecl = cpu_to_be32(1);
if (client_reserve) {
- hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
- hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
- hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
- hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
- req->common.length = sizeof(hostdata->caps);
+ hostdata->caps.reserve.common.cap_type =
+ cpu_to_be32(RESERVATION_CAPABILITIES);
+ hostdata->caps.reserve.common.length =
+ cpu_to_be16(sizeof(hostdata->caps.reserve));
+ hostdata->caps.reserve.common.server_support =
+ cpu_to_be16(SERVER_SUPPORTS_CAP);
+ hostdata->caps.reserve.type =
+ cpu_to_be32(CLIENT_RESERVE_SCSI_2);
+ req->common.length =
+ cpu_to_be16(sizeof(hostdata->caps));
} else
- req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+ req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
+ sizeof(hostdata->caps.reserve));
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
static void fast_fail_rsp(struct srp_event_struct *evt_struct)
{
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
- u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+ u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
if (status == VIOSRP_MAD_NOT_SUPPORTED)
dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
fast_fail_mad = &evt_struct->iu.mad.fast_fail;
memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
- fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
- fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+ fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
+ fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
"host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
+ be32_to_cpu(hostdata->madapter_info.partition_number),
+ be32_to_cpu(hostdata->madapter_info.os_type),
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
- hostdata->madapter_info.port_max_txu[0] >> 9;
+ be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
- if (hostdata->madapter_info.os_type == 3 &&
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
- if (hostdata->madapter_info.os_type == 3) {
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
enable_fast_fail(hostdata);
return;
}
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
- req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
- req->common.length = sizeof(hostdata->madapter_info);
- req->buffer = hostdata->adapter_info_addr;
+ req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
+ req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
+ req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
spin_lock_irqsave(hostdata->host->host_lock, flags);
if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT;
- tsk_mgmt->lun = ((u64) lun) << 48;
+ tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
{
long rc;
unsigned long flags;
+ /* The hypervisor copies our tag value here so no byteswapping */
struct srp_event_struct *evt_struct =
- (struct srp_event_struct *)crq->IU_data_ptr;
+ (__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) {
case 0xC0: /* initialization */
switch (crq->format) {
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
*/
if (!valid_event_struct(&hostdata->pool, evt_struct)) {
dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (atomic_read(&evt_struct->free)) {
dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
- (void *)crq->IU_data_ptr);
+ evt_struct);
return;
}
if (crq->format == VIOSRP_SRP_FORMAT)
- atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
+ atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
&hostdata->request_limit);
del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
/* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config));
- host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
- host_config->common.length = length;
- host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
- length,
- DMA_BIDIRECTIONAL);
+ host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
+ host_config->common.length = cpu_to_be16(length);
+ addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
+ if (dma_mapping_error(hostdata->dev, addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
dev_err(hostdata->dev,
"dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
return -1;
}
+ host_config->buffer = cpu_to_be64(addr);
+
init_completion(&evt_struct->comp);
spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 2cd735d1d196..116243087622 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -75,9 +75,9 @@ struct viosrp_crq {
u8 format; /* SCSI vs out-of-band */
u8 reserved;
u8 status; /* non-scsi failure? (e.g. DMA failure) */
- u16 timeout; /* in seconds */
- u16 IU_length; /* in bytes */
- u64 IU_data_ptr; /* the TCE for transferring data */
+ __be16 timeout; /* in seconds */
+ __be16 IU_length; /* in bytes */
+ __be64 IU_data_ptr; /* the TCE for transferring data */
};
/* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@ enum viosrp_capability_flag {
* Common MAD header
*/
struct mad_common {
- u32 type;
- u16 status;
- u16 length;
- u64 tag;
+ __be32 type;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
};
/*
@@ -139,23 +139,23 @@ struct mad_common {
*/
struct viosrp_empty_iu {
struct mad_common common;
- u64 buffer;
- u32 port;
+ __be64 buffer;
+ __be32 port;
};
struct viosrp_error_log {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_adapter_info {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_host_config {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct viosrp_fast_fail {
@@ -164,27 +164,27 @@ struct viosrp_fast_fail {
struct viosrp_capabilities {
struct mad_common common;
- u64 buffer;
+ __be64 buffer;
};
struct mad_capability_common {
- u32 cap_type;
- u16 length;
- u16 server_support;
+ __be32 cap_type;
+ __be16 length;
+ __be16 server_support;
};
struct mad_reserve_cap {
struct mad_capability_common common;
- u32 type;
+ __be32 type;
};
struct mad_migration_cap {
struct mad_capability_common common;
- u32 ecl;
+ __be32 ecl;
};
struct capabilities{
- u32 flags;
+ __be32 flags;
char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN];
struct mad_migration_cap migration;
@@ -208,10 +208,10 @@ union viosrp_iu {
struct mad_adapter_info_data {
char srp_version[8];
char partition_name[96];
- u32 partition_number;
- u32 mad_version;
- u32 os_type;
- u32 port_max_txu[8]; /* per-port maximum transfer */
+ __be32 partition_number;
+ __be32 mad_version;
+ __be32 os_type;
+ __be32 port_max_txu[8]; /* per-port maximum transfer */
};
#endif
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6601e03520cc..36ac1c34ce97 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9990,6 +9990,20 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 07a85ce41782..cad1483f05da 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -100,6 +100,13 @@
#define IPR_SUBS_DEV_ID_57D6 0x03FC
#define IPR_SUBS_DEV_ID_57D7 0x03FF
#define IPR_SUBS_DEV_ID_57D8 0x03FE
+#define IPR_SUBS_DEV_ID_57D9 0x046D
+#define IPR_SUBS_DEV_ID_57EB 0x0474
+#define IPR_SUBS_DEV_ID_57EC 0x0475
+#define IPR_SUBS_DEV_ID_57ED 0x0499
+#define IPR_SUBS_DEV_ID_57EE 0x049A
+#define IPR_SUBS_DEV_ID_57EF 0x049B
+#define IPR_SUBS_DEV_ID_57F0 0x049C
#define IPR_NAME "ipr"
/*
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index cd962da4a57a..85c77f6b802b 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
&ihost->phys[phy_index]);
assigned_phy_mask |= (1 << phy_index);
+ phy_index++;
}
- phy_index++;
}
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9e2588a6881c..add6d1566ec8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
struct iscsi_conn *conn = sk->sk_user_data;
if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+ (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
!atomic_read(&sk->sk_rmem_alloc)) {
ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ae69dfcc7834..e3995612ea76 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2812,6 +2812,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->boot_nic);
kfree(session->boot_target);
kfree(session->ifacename);
+ kfree(session->portal_type);
+ kfree(session->discovery_parent_type);
iscsi_destroy_session(cls_session);
iscsi_host_dec_session_cnt(shost);
@@ -3168,6 +3170,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ int val;
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
@@ -3257,6 +3260,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
return iscsi_switch_str_param(&session->boot_nic, buf);
case ISCSI_PARAM_BOOT_TARGET:
return iscsi_switch_str_param(&session->boot_target, buf);
+ case ISCSI_PARAM_PORTAL_TYPE:
+ return iscsi_switch_str_param(&session->portal_type, buf);
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ return iscsi_switch_str_param(&session->discovery_parent_type,
+ buf);
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ sscanf(buf, "%d", &val);
+ session->discovery_sess = !!val;
+ break;
default:
return -ENOSYS;
}
@@ -3305,6 +3317,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_DATASEQ_INORDER_EN:
len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
break;
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+ break;
case ISCSI_PARAM_ERL:
len = sprintf(buf, "%d\n", session->erl);
break;
@@ -3344,6 +3359,52 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_BOOT_TARGET:
len = sprintf(buf, "%s\n", session->boot_target);
break;
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+ break;
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ len = sprintf(buf, "%u\n", session->discovery_sess);
+ break;
+ case ISCSI_PARAM_PORTAL_TYPE:
+ len = sprintf(buf, "%s\n", session->portal_type);
+ break;
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ len = sprintf(buf, "%u\n", session->chap_auth_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ len = sprintf(buf, "%u\n", session->discovery_logout_en);
+ break;
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ len = sprintf(buf, "%u\n", session->bidi_chap_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+ break;
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ len = sprintf(buf, "%d\n", session->time2wait);
+ break;
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ len = sprintf(buf, "%d\n", session->time2retain);
+ break;
+ case ISCSI_PARAM_TSID:
+ len = sprintf(buf, "%u\n", session->tsid);
+ break;
+ case ISCSI_PARAM_ISID:
+ len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ session->isid[0], session->isid[1],
+ session->isid[2], session->isid[3],
+ session->isid[4], session->isid[5]);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ if (session->discovery_parent_type)
+ len = sprintf(buf, "%s\n",
+ session->discovery_parent_type);
+ else
+ len = sprintf(buf, "\n");
+ break;
default:
return -ENOSYS;
}
@@ -3433,6 +3494,54 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_PERSISTENT_ADDRESS:
len = sprintf(buf, "%s\n", conn->persistent_address);
break;
+ case ISCSI_PARAM_STATSN:
+ len = sprintf(buf, "%u\n", conn->statsn);
+ break;
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ len = sprintf(buf, "%u\n", conn->max_segment_size);
+ break;
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+ break;
+ case ISCSI_PARAM_LOCAL_PORT:
+ len = sprintf(buf, "%u\n", conn->local_port);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+ break;
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+ break;
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+ break;
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+ break;
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ len = sprintf(buf, "%u\n", conn->fragment_disable);
+ break;
+ case ISCSI_PARAM_IPV4_TOS:
+ len = sprintf(buf, "%u\n", conn->ipv4_tos);
+ break;
+ case ISCSI_PARAM_IPV6_TC:
+ len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+ break;
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+ break;
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ break;
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+ break;
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+ break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 93f222d66716..4e1b75ca7451 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -421,6 +421,7 @@ struct lpfc_vport {
uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time;
uint32_t cfg_tgt_queue_depth;
+ uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
@@ -707,11 +708,10 @@ struct lpfc_hba {
uint32_t cfg_multi_ring_type;
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
+ uint32_t cfg_task_mgmt_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
- uint32_t cfg_fcp_wq_count;
- uint32_t cfg_fcp_eq_count;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5cb08ae3e8c2..00656fc92b93 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,9 +674,6 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int i;
int rc;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
- return 0;
-
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
@@ -744,14 +741,15 @@ lpfc_selective_reset(struct lpfc_hba *phba)
int status = 0;
int rc;
- if ((!phba->cfg_enable_hba_reset) ||
- (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (!phba->cfg_enable_hba_reset)
return -EACCES;
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (status != 0)
- return status;
+ if (status != 0)
+ return status;
+ }
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -818,7 +816,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
* the readyness after performing a firmware reset.
*
* Returns:
- * zero for success, -EPERM when port does not have privilage to perform the
+ * zero for success, -EPERM when port does not have privilege to perform the
* reset, -EIO when port timeout from recovering from the reset.
*
* Note:
@@ -835,7 +833,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
- /* verify if privilaged for the request operation */
+ /* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
!bf_get(lpfc_sliport_status_err, &portstat_reg))
return -EPERM;
@@ -927,9 +925,9 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (rc == -EPERM) {
- /* no privilage for reset */
+ /* no privilege for reset */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "3150 No privilage to perform the requested "
+ "3150 No privilege to perform the requested "
"access: x%x\n", reg_val);
} else if (rc == -EIO) {
/* reset failed, there is nothing more we can do */
@@ -1867,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "3053 lpfc_" #attr " changed from %d to %d\n", \
- vport->cfg_##attr, val); \
+ "3053 lpfc_" #attr \
+ " changed from %d (x%x) to %d (x%x)\n", \
+ vport->cfg_##attr, vport->cfg_##attr, \
+ val, val); \
vport->cfg_##attr = val;\
return 0;\
}\
@@ -2591,9 +2591,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# If this parameter value is greater than 1/8th the maximum number of exchanges
+# supported by the HBA port, then the lun queue depth will be reduced to
+# 1/8th the maximum number of exchanges.
*/
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@@ -2601,7 +2604,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
- "Max number of FCP commands we can queue to a specific target port");
+ "Max number of FCP commands we can queue to a specific target port");
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
@@ -3949,6 +3952,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
+# lpfc_first_burst_size: First burst size to use on the NPorts
+# that support first burst.
+# Value range is [0,65536]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
+ "First burst size for Targets that support first burst");
+
+/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
# depth. Default value is 0. When the value of this parameter is zero the
# SCSI command completion time is not used for controlling I/O queue depth. When
@@ -4002,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the
# current CPU.
+# It would be set to 1 by the driver if it's able to set up cpu affinity
+# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
+# roundrobin scheduling of FCP I/Os through WQs will be used.
*/
-LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
@@ -4101,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring");
/*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+ "Maximum time to wait for task management commands to complete");
+/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled
@@ -4112,25 +4132,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
-# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
-# This parameter is ignored and will eventually be depricated
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
- LPFC_FCP_IO_CHAN_MAX,
- "Set the number of fast-path FCP work queues, if possible");
-
-/*
-# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
- LPFC_FCP_IO_CHAN_MAX,
- "Set the number of fast-path FCP event queues, if possible");
-
-/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
@@ -4276,6 +4277,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
&dev_attr_lpfc_ack0,
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
@@ -4304,11 +4306,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_issue_reset,
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
+ &dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
- &dev_attr_lpfc_fcp_wq_count,
- &dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
@@ -4352,6 +4353,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_restrict_login,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_nport_evt_cnt,
@@ -5284,14 +5286,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_topology_init(phba, lpfc_topology);
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+ lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
- lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
- lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -5331,6 +5332,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
+ lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6630520d295c..82134d20e2d8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
iocb = &dd_data->context_un.iocb;
ndlp = iocb->ndlp;
rmp = iocb->rmp;
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
int request_nseg;
int reply_nseg;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
}
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (iocb_stat == IOCB_SUCCESS)
+
+ if (iocb_stat == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed yet */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (iocb_stat == IOCB_BUSY)
+ } else if (iocb_stat == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
/* iocb failed so cleanup */
+ job->dd_data = NULL;
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
rsp = &rspiocbq->iocb;
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (rc == IOCB_BUSY)
+ } else if (rc == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
-linkdown_err:
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+linkdown_err:
cmdiocbq->context1 = ndlp;
lpfc_els_free_iocb(phba, cmdiocbq);
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
- struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_event *evt, *evt_next;
struct event_data *evt_dat = NULL;
unsigned long flags;
uint32_t rc = 0;
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
- list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
/* allocate our bsg tracking structure */
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -2498,7 +2547,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
struct lpfc_sli_ct_request *ctreq = NULL;
int ret_val = 0;
int time_left;
- int iocb_stat = 0;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
*txxri = 0;
@@ -2574,12 +2623,13 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq,
(phba->fc_ratov * 2)
+ LPFC_DRVR_TIMEOUT);
- if (iocb_stat) {
+ if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
ret_val = -EIO;
goto err_get_xri_exit;
}
@@ -2627,7 +2677,7 @@ err_get_xri_exit:
* @phba: Pointer to HBA context object
*
* This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
- * retruns the pointer to the buffer.
+ * returns the pointer to the buffer.
**/
static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
@@ -2963,7 +3013,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
int time_left;
- int iocb_stat;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
void *dataout = NULL;
uint32_t total_mem;
@@ -3149,12 +3199,14 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOCB_SUCCESS))) {
+ if ((iocb_stat != IOCB_SUCCESS) ||
+ ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
@@ -3209,7 +3261,7 @@ err_loopback_test_exit:
lpfc_bsg_event_unref(evt); /* delete */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- if (cmdiocbq != NULL)
+ if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
lpfc_sli_release_iocbq(phba, cmdiocbq);
if (rspiocbq != NULL)
@@ -5282,9 +5334,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag
*/
-
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O abort window is still open */
+ if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return -EAGAIN;
+ }
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
@@ -5294,8 +5352,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
@@ -5319,9 +5376,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag.
*/
-
cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
@@ -5331,8 +5389,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
}
if (list_empty(&completions))
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 68391177432b..da61d8dc0449 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context1 = NULL;
+ ctiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, ctiocb);
return 0;
@@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0268 NS cmd %x Error (%d %d)\n",
+ "0268 NS cmd x%x Error (x%x x%x)\n",
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index af49fb03dbb8..e409ba5f728c 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -154,6 +154,7 @@ struct lpfc_node_rrq {
#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
+#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
/* ndlp usage management macros */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6b8ee7449f16..110445f0c58d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2122,6 +2122,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
+ if (vport->cfg_first_burst_size)
+ npr->writeXferRdyDis = 1;
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 60d6ca2f68c2..883ea2d9f237 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4171,8 +4171,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
}
struct lpfc_nodelist *
@@ -4217,6 +4215,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state);
@@ -4437,6 +4438,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/*
@@ -4456,7 +4458,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
int rc;
uint16_t rpi;
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+ ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+ if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "3366 RPI x%x needs to be "
+ "unregistered nlp_flag x%x "
+ "did x%x\n",
+ ndlp->nlp_rpi, ndlp->nlp_flag,
+ ndlp->nlp_DID);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
/* SLI4 ports require the physical rpi value. */
@@ -5608,6 +5618,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4ec3d7c044c2..5464b116d328 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -234,6 +234,9 @@ struct ulp_bde64 {
uint32_t addrHigh;
};
+/* Maximun size of immediate data that can fit into a 128 byte WQE */
+#define LPFC_MAX_BDE_IMM_SIZE 64
+
struct lpfc_sli4_flags {
uint32_t word0;
#define lpfc_idx_rsrc_rdy_SHIFT 0
@@ -2585,6 +2588,9 @@ struct lpfc_sli4_parameters {
#define cfg_mqv_WORD word6
uint32_t word7;
uint32_t word8;
+#define cfg_wqsize_SHIFT 8
+#define cfg_wqsize_MASK 0x0000000f
+#define cfg_wqsize_WORD word8
#define cfg_wqv_SHIFT 14
#define cfg_wqv_MASK 0x00000003
#define cfg_wqv_WORD word8
@@ -3433,7 +3439,8 @@ struct els_request64_wqe {
#define els_req64_hopcnt_SHIFT 24
#define els_req64_hopcnt_MASK 0x000000ff
#define els_req64_hopcnt_WORD word13
- uint32_t reserved[2];
+ uint32_t word14;
+ uint32_t max_response_payload_len;
};
struct xmit_els_rsp64_wqe {
@@ -3548,7 +3555,8 @@ struct gen_req64_wqe {
uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4];
+ uint32_t rsvd_12_14[3];
+ uint32_t max_response_payload_len;
};
struct create_xri_wqe {
@@ -3578,7 +3586,13 @@ struct abort_cmd_wqe {
struct fcp_iwrite64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_offset_len;
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
@@ -3588,7 +3602,13 @@ struct fcp_iwrite64_wqe {
struct fcp_iread64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_offset_len; /* word 3 */
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -3598,7 +3618,13 @@ struct fcp_iread64_wqe {
struct fcp_icmnd64_wqe {
struct ulp_bde64 bde; /* words 0-2 */
- uint32_t rsrvd3; /* word 3 */
+ uint32_t word3;
+#define cmd_buff_len_SHIFT 16
+#define cmd_buff_len_MASK 0x00000ffff
+#define cmd_buff_len_WORD word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
uint32_t rsrvd4; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -3622,6 +3648,13 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+union lpfc_wqe128 {
+ uint32_t words[32];
+ struct lpfc_wqe_generic generic;
+ struct xmit_seq64_wqe xmit_sequence;
+ struct gen_req64_wqe gen_req;
+};
+
#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e0b20fad8502..ca6bf2af7ce8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -472,10 +472,22 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
- phba->cfg_hba_queue_depth =
- (mb->un.varRdConfig.max_xri + 1) -
- lpfc_sli4_get_els_iocb_cnt(phba);
+ i = (mb->un.varRdConfig.max_xri + 1);
+ if (phba->cfg_hba_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3359 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, i);
+ phba->cfg_hba_queue_depth = i;
+ }
+
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ i = (mb->un.varRdConfig.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3360 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, i);
+ phba->pport->cfg_lun_queue_depth = i;
+ }
phba->lmt = mb->un.varRdConfig.lmt;
@@ -3019,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_max);
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@@ -3058,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
return 0;
@@ -4533,7 +4545,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_save_state(pdev);
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
- if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(pdev))
pdev->needs_freset = 1;
return 0;
@@ -4847,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
+ /* Get all the module params for configuring this host */
+ lpfc_get_cfgparam(phba);
+
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
if (rc)
@@ -4890,20 +4905,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_mbox_ext_buf_ctx));
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
- /*
- * We need to do a READ_CONFIG mailbox command here before
- * calling lpfc_get_cfgparam. For VFs this will report the
- * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
- * All of the resources allocated
- * for this Port are tied to these values.
- */
- /* Get all the module params for configuring this host */
- lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
- /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
- phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
-
/* This will be set to correct value after the read_config mbox */
phba->max_vports = 0;
@@ -6664,12 +6667,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- if (phba->cfg_hba_queue_depth >
- (phba->sli4_hba.max_cfg_param.max_xri -
- lpfc_sli4_get_els_iocb_cnt(phba)))
- phba->cfg_hba_queue_depth =
- phba->sli4_hba.max_cfg_param.max_xri -
- lpfc_sli4_get_els_iocb_cnt(phba);
+ length = phba->sli4_hba.max_cfg_param.max_xri -
+ lpfc_sli4_get_els_iocb_cnt(phba);
+ if (phba->cfg_hba_queue_depth > length) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3361 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, length);
+ phba->cfg_hba_queue_depth = length;
+ }
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2)
@@ -6859,11 +6864,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
}
- /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
-
/* The actual number of FCP event queues adopted */
- phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
- phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
/* Get EQ depth from module parameter, fake the default for now */
@@ -7134,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL;
}
- if (phba->pci_bar0_memmap_p) {
- iounmap(phba->pci_bar0_memmap_p);
- phba->pci_bar0_memmap_p = NULL;
- }
- if (phba->pci_bar2_memmap_p) {
- iounmap(phba->pci_bar2_memmap_p);
- phba->pci_bar2_memmap_p = NULL;
- }
- if (phba->pci_bar4_memmap_p) {
- iounmap(phba->pci_bar4_memmap_p);
- phba->pci_bar4_memmap_p = NULL;
- }
-
/* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map);
@@ -7935,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
* particular PCI BARs regions is dependent on the type of
* SLI4 device.
*/
- if (pci_resource_start(pdev, 0)) {
- phba->pci_bar0_map = pci_resource_start(pdev, 0);
- bar0map_len = pci_resource_len(pdev, 0);
+ if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+ phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
/*
* Map SLI4 PCI Config Space Register base to a kernel virtual
@@ -7951,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"registers.\n");
goto out;
}
+ phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
/* Set up BAR0 PCI config space register memory map */
lpfc_sli4_bar0_register_memmap(phba, if_type);
} else {
@@ -7973,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
}
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, 2))) {
+ (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
/*
* Map SLI4 if type 0 HBA Control Register base to a kernel
* virtual address and setup the registers.
*/
- phba->pci_bar1_map = pci_resource_start(pdev, 2);
- bar1map_len = pci_resource_len(pdev, 2);
+ phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
phba->sli4_hba.ctrl_regs_memmap_p =
ioremap(phba->pci_bar1_map, bar1map_len);
if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@@ -7987,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA control registers.\n");
goto out_iounmap_conf;
}
+ phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
lpfc_sli4_bar1_register_memmap(phba);
}
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, 4))) {
+ (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
/*
* Map SLI4 if type 0 HBA Doorbell Register base to a kernel
* virtual address and setup the registers.
*/
- phba->pci_bar2_map = pci_resource_start(pdev, 4);
- bar2map_len = pci_resource_len(pdev, 4);
+ phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
phba->sli4_hba.drbl_regs_memmap_p =
ioremap(phba->pci_bar2_map, bar2map_len);
if (!phba->sli4_hba.drbl_regs_memmap_p) {
@@ -8005,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
"ioremap failed for SLI4 HBA doorbell registers.\n");
goto out_iounmap_ctrl;
}
+ phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
if (error)
goto out_iounmap_all;
@@ -8398,7 +8389,8 @@ static int
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
{
int i, idx, saved_chann, used_chann, cpu, phys_id;
- int max_phys_id, num_io_channel, first_cpu;
+ int max_phys_id, min_phys_id;
+ int num_io_channel, first_cpu, chan;
struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
@@ -8416,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
phba->sli4_hba.num_present_cpu));
max_phys_id = 0;
+ min_phys_id = 0xff;
phys_id = 0;
num_io_channel = 0;
first_cpu = LPFC_VECTOR_MAP_EMPTY;
@@ -8439,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
+ if (cpup->phys_id < min_phys_id)
+ min_phys_id = cpup->phys_id;
cpup++;
}
+ phys_id = min_phys_id;
/* Now associate the HBA vectors with specific CPUs */
for (idx = 0; idx < vectors; idx++) {
cpup = phba->sli4_hba.cpu_map;
@@ -8452,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
for (i = 1; i < max_phys_id; i++) {
phys_id++;
if (phys_id > max_phys_id)
- phys_id = 0;
+ phys_id = min_phys_id;
cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY)
continue;
goto found;
}
+ /* Use round robin for scheduling */
+ phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
+ chan = 0;
+ cpup = phba->sli4_hba.cpu_map;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ cpup->channel_id = chan;
+ cpup++;
+ chan++;
+ if (chan >= phba->cfg_fcp_io_channel)
+ chan = 0;
+ }
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3329 Cannot set affinity:"
"Error mapping vector %d (%d)\n",
@@ -8496,7 +8504,7 @@ found:
/* Spread vector mapping across multple physical CPU nodes */
phys_id++;
if (phys_id > max_phys_id)
- phys_id = 0;
+ phys_id = min_phys_id;
}
/*
@@ -8506,7 +8514,7 @@ found:
* Base the remaining IO channel assigned, to IO channels already
* assigned to other CPUs on the same phys_id.
*/
- for (i = 0; i <= max_phys_id; i++) {
+ for (i = min_phys_id; i <= max_phys_id; i++) {
/*
* If there are no io channels already mapped to
* this phys_id, just round robin thru the io_channels.
@@ -8588,10 +8596,11 @@ out:
if (num_io_channel != phba->sli4_hba.num_present_cpu)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set affinity mismatch:"
- "%d chann != %d cpus: %d vactors\n",
+ "%d chann != %d cpus: %d vectors\n",
num_io_channel, phba->sli4_hba.num_present_cpu,
vectors);
+ /* Enable using cpu affinity for scheduling */
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
return 1;
}
@@ -8682,9 +8691,12 @@ enable_msix_vectors:
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
+ for (--index; index >= 0; index--) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
+ }
msi_fail_out:
/* Unconfigure MSI-X capability structure */
@@ -8705,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
int index;
/* Free up MSI-X multi-message vectors */
- for (index = 0; index < phba->cfg_fcp_io_channel; index++)
+ for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
+ irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
+ vector, NULL);
free_irq(phba->sli4_hba.msix_entries[index].vector,
&phba->sli4_hba.fcp_eq_hdl[index]);
+ }
/* Disable MSI-X */
pci_disable_msix(phba->pcidev);
@@ -9154,6 +9169,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b1c510f6b8f0..1f292e29d566 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxOwner = OWN_HOST;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
- mb->un.varDmp.entry_index = 0;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ mb->un.varDmp.entry_index = 0;
mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
@@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* NEW_FEATURE
* SLI-2, Coalescing Response Feature.
*/
- if (phba->cfg_cr_delay) {
+ if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
mb->un.varCfgLnk.cr = 1;
mb->un.varCfgLnk.ci = 1;
mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
@@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgLnk.crtov = phba->fc_crtov;
mb->un.varCfgLnk.citov = phba->fc_citov;
- if (phba->cfg_ack0)
+ if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
mb->un.varCfgLnk.ack0_enable = 1;
mb->mbxCommand = MBX_CONFIG_LINK;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6aaf39a1f1c5..abc361259d6d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -690,11 +690,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc)
+ if (npr->targetFunc) {
ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
@@ -1676,12 +1680,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check out PRLI rsp */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc)
+ if (npr->targetFunc) {
ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 243de1d324b7..b2ede05a5f0a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
/* get all SCSI buffers need to repost to a local list */
spin_lock_irq(&phba->scsi_buf_list_get_lock);
- spin_lock_irq(&phba->scsi_buf_list_put_lock);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
- spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
/* post the list of scsi buffer sgls to port if available */
@@ -1000,29 +1000,37 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
}
memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
- /* Page alignment is CRITICAL, double check to be sure */
- if (((unsigned long)(psb->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double check
+ * to be sure.
+ */
+ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
- /* Allocate iotag for psb->cur_iocbq. */
- iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
- if (iotag == 0) {
+
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ psb->data, psb->dma_handle);
kfree(psb);
break;
}
- lxri = lpfc_sli4_next_xritag(phba);
- if (lxri == NO_XRI) {
+ /* Allocate iotag for psb->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+ if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ psb->data, psb->dma_handle);
kfree(psb);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3368 Failed to allocated IOTAG for"
+ " XRI:0x%x\n", lxri);
+ lpfc_sli4_free_xri(phba, lxri);
break;
}
psb->cur_iocbq.sli4_lxritag = lxri;
@@ -1134,22 +1142,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
struct lpfc_scsi_buf * lpfc_cmd = NULL;
struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
- unsigned long gflag = 0;
- unsigned long pflag = 0;
+ unsigned long iflag = 0;
- spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
list);
if (!lpfc_cmd) {
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
list_remove_head(scsi_buf_list_get, lpfc_cmd,
struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
}
- spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
return lpfc_cmd;
}
/**
@@ -1167,11 +1174,10 @@ static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
- unsigned long gflag = 0;
- unsigned long pflag = 0;
+ unsigned long iflag = 0;
int found = 0;
- spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+ spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active(phba, ndlp,
@@ -1182,11 +1188,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break;
}
if (!found) {
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+ spin_lock(&phba->scsi_buf_list_put_lock);
list_splice(&phba->lpfc_scsi_buf_list_put,
&phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+ spin_unlock(&phba->scsi_buf_list_put_lock);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_scsi_buf_list_get, list) {
if (lpfc_test_rrq_active(
@@ -1197,7 +1203,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
break;
}
}
- spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
+ spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
return lpfc_cmd;
@@ -3966,11 +3972,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/*
* Check SLI validation that all the transfer was actually done
- * (fcpi_parm should be zero).
+ * (fcpi_parm should be zero). Apply check only to reads.
*/
- } else if (fcpi_parm) {
+ } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "9029 FCP Data Transfer Check Error: "
+ "9029 FCP Read Check Error Data: "
"x%x x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@@ -4342,6 +4348,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
char tag[2];
uint8_t *ptr;
bool sli4;
+ uint32_t fcpdl;
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return;
@@ -4386,11 +4393,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- if (sli4)
- iocb_cmd->ulpPU = PARM_READ_CHECK;
- else {
- iocb_cmd->un.fcpi.fcpi_parm = 0;
- iocb_cmd->ulpPU = 0;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ if (vport->cfg_first_burst_size &&
+ (pnode->nlp_flag & NLP_FIRSTBURST)) {
+ fcpdl = scsi_bufflen(scsi_cmnd);
+ if (fcpdl < vport->cfg_first_burst_size)
+ piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+ else
+ piocbq->iocb.un.fcpi.fcpi_XRdy =
+ vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
@@ -4479,9 +4490,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb->ulpContext =
vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
}
- if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
- piocb->ulpFCP2Rcvy = 1;
- }
+ piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
/* ulpTimeout is only one byte */
@@ -4878,6 +4887,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock;
}
+ /* Indicate the IO is being aborted by the driver. */
+ iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
/*
* The scsi command can not be in txq and it is in flight because the
* pCmd is still pointig at the SCSI command we have to abort. There
@@ -4972,6 +4984,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
}
}
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t rsp_info;
+ uint32_t rsp_len;
+ uint8_t rsp_info_code;
+ int ret = FAILED;
+
+
+ if (fcprsp == NULL)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0703 fcp_rsp is missing\n");
+ else {
+ rsp_info = fcprsp->rspStatus2;
+ rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+ rsp_info_code = fcprsp->rspInfo3;
+
+
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_FCP,
+ "0706 fcp_rsp valid 0x%x,"
+ " rsp len=%d code 0x%x\n",
+ rsp_info,
+ rsp_len, rsp_info_code);
+
+ if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+ switch (rsp_info_code) {
+ case RSP_NO_FAILURE:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0715 Task Mgmt No Failure\n");
+ ret = SUCCESS;
+ break;
+ case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0716 Task Mgmt Target "
+ "reject\n");
+ break;
+ case RSP_TM_NOT_COMPLETED: /* TM failed */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0717 Task Mgmt Target "
+ "failed TM\n");
+ break;
+ case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0718 Task Mgmt to invalid "
+ "LUN\n");
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
@@ -5006,7 +5085,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
if (lpfc_cmd == NULL)
return FAILED;
- lpfc_cmd->timeout = 60;
+ lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
@@ -5022,6 +5101,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
+ iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
@@ -5032,13 +5112,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
- if (status != IOCB_SUCCESS) {
- if (status == IOCB_TIMEDOUT) {
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
- ret = TIMEOUT_ERROR;
- } else
- ret = FAILED;
- lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ if ((status != IOCB_SUCCESS) ||
+ (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
"iocb_flag x%x\n",
@@ -5046,9 +5121,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4],
iocbq->iocb_flag);
- } else if (status == IOCB_BUSY)
- ret = FAILED;
- else
+ /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+ if (status == IOCB_SUCCESS) {
+ if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ /* Something in the FCP_RSP was invalid.
+ * Check conditions */
+ ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+ else
+ ret = FAILED;
+ } else if (status == IOCB_TIMEDOUT) {
+ ret = TIMEOUT_ERROR;
+ } else {
+ ret = FAILED;
+ }
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ } else
ret = SUCCESS;
lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -5172,7 +5259,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status, ret = SUCCESS;
+ int status;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5213,9 +5300,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_LUN);
- return ret;
+
+ return status;
}
/**
@@ -5239,7 +5328,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status, ret = SUCCESS;
+ int status;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5280,9 +5369,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ if (status == SUCCESS)
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
- return ret;
+ return status;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index b1d9f7fcb911..852ff7def493 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -73,6 +73,7 @@ struct fcp_rsp {
#define RSP_RO_MISMATCH_ERR 0x03
#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */
uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43440ca16f46..8f580fda443f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -6163,6 +6165,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
kfree(vpd);
goto out_free_mbox;
}
+
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6249,6 +6252,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3362 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, rc);
+ phba->pport->cfg_lun_queue_depth = rc;
+ }
+
+
/*
* Discover the port's supported feature set and match it against the
* hosts requests.
@@ -6555,6 +6568,108 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ * are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+ uint32_t idx;
+ struct lpfc_queue *mcq;
+ struct lpfc_mcqe *mcqe;
+ bool pending_completions = false;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Check for completions on mailbox completion queue */
+
+ mcq = phba->sli4_hba.mbx_cq;
+ idx = mcq->hba_index;
+ while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+ mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+ if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+ (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+ pending_completions = true;
+ break;
+ }
+ idx = (idx + 1) % mcq->entry_count;
+ if (mcq->hba_index == idx)
+ break;
+ }
+ return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ * that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+ uint32_t eqidx;
+ struct lpfc_queue *fpeq = NULL;
+ struct lpfc_eqe *eqe;
+ bool mbox_pending;
+
+ if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+ return false;
+
+ /* Find the eq associated with the mcq */
+
+ if (phba->sli4_hba.hba_eq)
+ for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+ phba->sli4_hba.mbx_cq->assoc_qid) {
+ fpeq = phba->sli4_hba.hba_eq[eqidx];
+ break;
+ }
+ if (!fpeq)
+ return false;
+
+ /* Turn off interrupts from this EQ */
+
+ lpfc_sli4_eq_clr_intr(fpeq);
+
+ /* Check to see if a mbox completion is pending */
+
+ mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+ /*
+ * If a mbox completion is pending, process all the events on EQ
+ * associated with the mbox completion queue (this could include
+ * mailbox commands, async events, els commands, receive queue data
+ * and fcp commands)
+ */
+
+ if (mbox_pending)
+ while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+ lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+ fpeq->EQ_processed++;
+ }
+
+ /* Always clear and re-arm the EQ */
+
+ lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+ return mbox_pending;
+
+}
/**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6572,6 +6687,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ /* If the mailbox completed, process the completion and return */
+ if (lpfc_sli4_process_missed_mbox_completions(phba))
+ return;
+
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
* worklist and the mailbox actually completing. When this
@@ -7066,6 +7185,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Make sure the mailbox is really active */
+ if (timeout)
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
@@ -8065,6 +8188,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ wqe->els_req.max_response_payload_len = total_len - xmit_len;
break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8109,8 +8233,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iwrite.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iwrite,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8128,8 +8254,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- wqe->fcp_iread.payload_offset_len =
- xmit_len + sizeof(struct fcp_rsp);
+ bf_set(payload_offset_len, &wqe->fcp_iread,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_iread,
+ 0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8145,8 +8273,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ xmit_len + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd,
+ 0);
/* word3 iocb=IO_TAG wqe=reserved */
- wqe->fcp_icmd.rsrvd3 = 0;
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8192,6 +8325,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ wqe->gen_req.max_response_payload_len = total_len - xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
@@ -9820,6 +9954,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abort_cmd) != 0)
continue;
+ /*
+ * If the iocbq is already being aborted, don't take a second
+ * action, but do count it.
+ */
+ if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
@@ -9827,6 +9968,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
+ /* indicate the IO is being aborted by the driver. */
+ iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9836,7 +9980,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
- abtsiocb->vport = phba->pport;
+ abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -9889,6 +10033,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
+ if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+ /*
+ * A time out has occurred for the iocb. If a time out
+ * completion handler has been supplied, call it. Otherwise,
+ * just free the iocbq.
+ */
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+ cmdiocbq->wait_iocb_cmpl = NULL;
+ if (cmdiocbq->iocb_cmpl)
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+ }
+
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9944,10 +10106,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. If the iocb command is not
- * completed within timeout seconds, it returns IOCB_TIMEDOUT.
- * Caller should not free the iocb resources if this function
- * returns IOCB_TIMEDOUT.
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure. If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up. If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT. The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* non-interruptible wait.
* This function will sleep while waiting for iocb completion.
@@ -9980,6 +10148,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
int txq_cnt = 0;
int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ unsigned long iflags;
+ bool iocb_completed = true;
+
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -9990,9 +10161,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->context2 = prspiocbq;
}
+ piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~LPFC_IO_WAKE;
+ piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -10009,10 +10181,26 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
- if (piocb->iocb_flag & LPFC_IO_WAKE) {
+ /*
+ * IOCB timed out. Inform the wake iocb wait
+ * completion function and set local status
+ */
+
+ iocb_completed = false;
+ piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
+ /* Note: we are not indicating if the IOCB has a success
+ * status or not - that's for the caller to check.
+ * IOCB_SUCCESS means just that the command was sent and
+ * completed. Not that it completed successfully.
+ * */
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no "
@@ -10122,7 +10310,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- lpfc_sli4_swap_str(phba, pmboxq);
} else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -11015,8 +11202,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ int numBdes, i;
unsigned long iflags;
- uint32_t status;
+ uint32_t status, max_response;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl, bde;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -11033,7 +11223,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ switch (pIocbOut->iocb.ulpCommand) {
+ case CMD_ELS_REQUEST64_CR:
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+ max_response = bde.tus.f.bdeSize;
+ break;
+ case CMD_GEN_REQUEST64_CR:
+ max_response = 0;
+ if (!pIocbOut->context3)
+ break;
+ numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+ sizeof(struct ulp_bde64);
+ dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ for (i = 0; i < numBdes; i++) {
+ bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ max_response += bde.tus.f.bdeSize;
+ }
+ break;
+ default:
+ max_response = wcqe->total_data_placed;
+ break;
+ }
+ if (max_response < wcqe->total_data_placed)
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+ else
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+ wcqe->total_data_placed;
}
/* Convert BG errors for completion status */
@@ -12184,7 +12403,6 @@ static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
{
struct pci_dev *pdev;
- unsigned long bar_map, bar_map_len;
if (!phba->pcidev)
return NULL;
@@ -12193,25 +12411,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
switch (pci_barset) {
case WQ_PCI_BAR_0_AND_1:
- if (!phba->pci_bar0_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
- phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar0_memmap_p;
case WQ_PCI_BAR_2_AND_3:
- if (!phba->pci_bar2_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar2_memmap_p;
case WQ_PCI_BAR_4_AND_5:
- if (!phba->pci_bar4_memmap_p) {
- bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
- }
return phba->pci_bar4_memmap_p;
default:
break;
@@ -12820,10 +13023,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+
+ /* wqv is the earliest version supported, NOT the latest */
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+ switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ case LPFC_Q_CREATE_VERSION_0:
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ /* Nothing to do, version 0 ONLY supports 64 byte */
+ page = wq_create->u.request.page;
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ /* If we get here the HBA MUST also support V1 and
+ * we MUST use it
+ */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
+ bf_set(lpfc_mbx_wq_create_wqe_count,
+ &wq_create->u.request_1, wq->entry_count);
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ }
+ break;
+ case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
switch (wq->entry_size) {
@@ -12834,6 +13071,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_64);
break;
case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
bf_set(lpfc_mbx_wq_create_wqe_size,
&wq_create->u.request_1,
LPFC_WQ_WQE_SIZE_128);
@@ -12842,9 +13084,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
(PAGE_SIZE/SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
- } else {
- page = wq_create->u.request.page;
+ break;
+ default:
+ status = -ERANGE;
+ goto out;
}
+
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
@@ -14665,14 +14910,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.vpi =
vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
+ tot_len = bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
first_iocbq->iocb.ulpBdeCount = 1;
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (tot_len > LPFC_DATA_BUF_SIZE)
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
first_iocbq->iocb.un.rcvels.remoteID = sid;
- tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
@@ -14688,14 +14939,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ if (len > LPFC_DATA_BUF_SIZE)
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ else
+ pbde->tus.f.bdeSize = len;
+
iocbq->iocb.unsli3.rcvsli3.acc_len += len;
tot_len += len;
} else {
@@ -14710,16 +14964,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
iocbq->iocb.ulpBdeCount = 1;
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (len > LPFC_DATA_BUF_SIZE)
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
- /* We need to get the size out of the right CQE */
- hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
- len = bf_get(lpfc_rcqe_length,
- &hbq_buf->cq_event.cqe.rcqe_cmpl);
tot_len += len;
iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
@@ -15001,6 +15258,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
uint16_t max_rpi, rpi_limit;
uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr;
+ unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
@@ -15009,7 +15267,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15025,7 +15283,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -15034,7 +15292,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* extents.
*/
if (!phba->sli4_hba.rpi_hdrs_in_use) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi;
}
@@ -15045,7 +15303,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* how many are supported max by the device.
*/
rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
@@ -15705,7 +15963,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
- struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -15715,7 +15973,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
}
/* Clear the eligible FCF record index bmask */
spin_lock_irq(&phba->hbalock);
- list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+ list) {
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
list_del_init(&fcf_pri->list);
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9d2e0c6fe334..6b0f2478706e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -58,9 +58,10 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint16_t iocb_flag;
+ uint32_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
-#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
+#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
+#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
@@ -72,11 +73,11 @@ struct lpfc_iocbq {
#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
- uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
@@ -93,6 +94,8 @@ struct lpfc_iocbq {
void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+ void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d710b87a4417..298c8cd1a89d 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -117,6 +117,7 @@ union sli4_qe {
struct lpfc_rcqe_complete *rcqe_complete;
struct lpfc_mqe *mqe;
union lpfc_wqe *wqe;
+ union lpfc_wqe128 *wqe128;
struct lpfc_rqe *rqe;
};
@@ -325,12 +326,14 @@ struct lpfc_bmbx {
#define LPFC_EQE_SIZE_16B 16
#define LPFC_CQE_SIZE 16
#define LPFC_WQE_SIZE 64
+#define LPFC_WQE128_SIZE 128
#define LPFC_MQE_SIZE 256
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
+#define LPFC_WQE128_DEF_COUNT 128
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -416,6 +419,9 @@ struct lpfc_pc_sli4_params {
uint8_t mqv;
uint8_t wqv;
uint8_t rqv;
+ uint8_t wqsize;
+#define LPFC_WQ_SZ64_SUPPORT 1
+#define LPFC_WQ_SZ128_SUPPORT 2
};
struct lpfc_iov {
@@ -517,7 +523,7 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
- uint8_t fw_func_mode; /* FW function protocol mode */
+ uint32_t fw_func_mode; /* FW function protocol mode */
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
@@ -667,6 +673,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6c32eebf3dd..e3094c4e143b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.40"
+#define LPFC_DRIVER_VERSION "8.3.43"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e28e431564b0..a87ee33f4f2a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Create binary sysfs attribute for vport */
lpfc_alloc_sysfs_attr(vport);
+ /* Set the DFT_LUN_Q_DEPTH accordingly */
+ vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
+
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e6a1e0b38a19..515c9629e9fe 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -549,7 +549,7 @@ out_probe_one:
/**
* megaraid_detach_one - release framework resources and call LLD release routine
- * @pdev : handle for our PCI cofiguration space
+ * @pdev : handle for our PCI configuration space
*
* This routine is called during driver unload. We free all the allocated
* resources and call the corresponding LLD so that it can also release all
@@ -979,7 +979,7 @@ megaraid_fini_mbox(adapter_t *adapter)
* @adapter : soft state of the raid controller
*
* Allocate and align the shared mailbox. This maibox is used to issue
- * all the commands. For IO based controllers, the mailbox is also regsitered
+ * all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
*/
@@ -2027,7 +2027,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
* @scb : scsi control block
* @scp : scsi command from the mid-layer
*
- * Prepare a command for the scsi physical devices. This rountine prepares
+ * Prepare a command for the scsi physical devices. This routine prepares
* commands for devices which can take extended CDBs (>10 bytes).
*/
static void
@@ -2586,7 +2586,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
}
/**
- * megaraid_reset_handler - device reset hadler for mailbox based driver
+ * megaraid_reset_handler - device reset handler for mailbox based driver
* @scp : reference command
*
* Reset handler for the mailbox based controller. First try to find out if
@@ -3446,7 +3446,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
* megaraid_mbox_setup_device_map - manage device ids
* @adapter : Driver's soft state
*
- * Manange the device ids to have an appropriate mapping between the kernel
+ * Manage the device ids to have an appropriate mapping between the kernel
* scsi addresses and megaraid scsi and logical drive addresses. We export
* scsi devices on their actual addresses, whereas the logical drives are
* exported on a virtual scsi channel.
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 25506c777381..dfffd0f37916 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -896,7 +896,7 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
/**
* mraid_mm_register_adp - Registration routine for low level drivers
- * @lld_adp : Adapter objejct
+ * @lld_adp : Adapter object
*/
int
mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 04a42a505852..e9e543c58485 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.600.18.00-rc1"
-#define MEGASAS_RELDATE "May. 15, 2013"
-#define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013"
+#define MEGASAS_VERSION "06.700.06.00-rc1"
+#define MEGASAS_RELDATE "Aug. 31, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
/*
* Device IDs
@@ -170,6 +170,7 @@
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_LD_LIST_QUERY 0x03010100
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
@@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE {
MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
};
+enum MR_LD_QUERY_TYPE {
+ MR_LD_QUERY_TYPE_ALL = 0,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+ MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
+ MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
+ MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
+};
+
+
#define MR_EVT_CFG_CLEARED 0x0004
#define MR_EVT_LD_STATE_CHANGE 0x0051
#define MR_EVT_PD_INSERTED 0x005b
@@ -435,6 +445,14 @@ struct MR_LD_LIST {
} ldList[MAX_LOGICAL_DRIVES];
} __packed;
+struct MR_LD_TARGETID_LIST {
+ u32 size;
+ u32 count;
+ u8 pad[3];
+ u8 targetId[MAX_LOGICAL_DRIVES];
+};
+
+
/*
* SAS controller properties
*/
@@ -474,21 +492,39 @@ struct megasas_ctrl_prop {
* a bit in the following structure.
*/
struct {
- u32 copyBackDisabled : 1;
- u32 SMARTerEnabled : 1;
- u32 prCorrectUnconfiguredAreas : 1;
- u32 useFdeOnly : 1;
- u32 disableNCQ : 1;
- u32 SSDSMARTerEnabled : 1;
- u32 SSDPatrolReadEnabled : 1;
- u32 enableSpinDownUnconfigured : 1;
- u32 autoEnhancedImport : 1;
- u32 enableSecretKeyControl : 1;
- u32 disableOnlineCtrlReset : 1;
- u32 allowBootWithPinnedCache : 1;
- u32 disableSpinDownHS : 1;
- u32 enableJBOD : 1;
- u32 reserved :18;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:18;
+ u32 enableJBOD:1;
+ u32 disableSpinDownHS:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 enableSecretKeyControl:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 disableNCQ:1;
+ u32 useFdeOnly:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 SMARTerEnabled:1;
+ u32 copyBackDisabled:1;
+#else
+ u32 copyBackDisabled:1;
+ u32 SMARTerEnabled:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 useFdeOnly:1;
+ u32 disableNCQ:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSecretKeyControl:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableSpinDownHS:1;
+ u32 enableJBOD:1;
+ u32 reserved:18;
+#endif
} OnOffProperties;
u8 autoSnapVDSpace;
u8 viewSpace;
@@ -802,6 +838,30 @@ struct megasas_ctrl_info {
u16 cacheMemorySize; /*7A2h */
struct { /*7A4h */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:11;
+ u32 supportUnevenSpans:1;
+ u32 dedicatedHotSparesLimited:1;
+ u32 headlessMode:1;
+ u32 supportEmulatedDrives:1;
+ u32 supportResetNow:1;
+ u32 realTimeScheduler:1;
+ u32 supportSSDPatrolRead:1;
+ u32 supportPerfTuning:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportJBOD:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportShieldState:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType1:1;
+ u32 supportPIcontroller:1;
+#else
u32 supportPIcontroller:1;
u32 supportLdPIType1:1;
u32 supportLdPIType2:1;
@@ -827,6 +887,7 @@ struct megasas_ctrl_info {
u32 supportUnevenSpans:1;
u32 reserved:11;
+#endif
} adapterOperations2;
u8 driverVersion[32]; /*7A8h */
@@ -863,7 +924,7 @@ struct megasas_ctrl_info {
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
-#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 1
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
@@ -1051,9 +1112,15 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:30;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
+#else
u32 support_fp_remote_lun:1;
u32 support_additional_msix:1;
u32 reserved:30;
+#endif
} mfi_capabilities;
u32 reg;
} MFI_CAPABILITIES;
@@ -1464,6 +1531,7 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
@@ -1656,4 +1724,16 @@ struct megasas_mgmt_info {
int max_index;
};
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 1f0ca68409d4..2cf9470dd11b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.600.18.00-rc1
+ * Version : 06.700.06.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+ u8 query_type);
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
@@ -374,13 +376,11 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- u32 consumer;
- consumer = *instance->consumer;
if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
- (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
+ (le32_to_cpu(*instance->consumer) ==
+ MEGASAS_ADPRESET_INPROG_SIGN))
return 1;
- }
return 0;
}
@@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
{
unsigned long flags;
spin_lock_irqsave(&instance->hba_lock, flags);
- writel(0, &(regs)->inbound_high_queue_port);
- writel((frame_phys_addr | (frame_count<<1))|1,
- &(regs)->inbound_low_queue_port);
+ writel(upper_32_bits(frame_phys_addr),
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
/*
* Issue the frame using inbound queue port
@@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
*/
abort_fr->cmd = MFI_CMD_ABORT;
abort_fr->cmd_status = 0xFF;
- abort_fr->flags = 0;
- abort_fr->abort_context = cmd_to_abort->index;
- abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
- abort_fr->abort_mfi_phys_addr_hi = 0;
+ abort_fr->flags = cpu_to_le16(0);
+ abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+ abort_fr->abort_mfi_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+ abort_fr->abort_mfi_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
cmd->sync_cmd = 1;
cmd->cmd_status = 0xFF;
@@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
}
}
return sge_count;
@@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
}
}
return sge_count;
@@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
if (sge_count) {
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge_skinny[i].length =
+ cpu_to_le32(sg_dma_len(os_sgl));
mfi_sgl->sge_skinny[i].phys_addr =
- sg_dma_address(os_sgl);
- mfi_sgl->sge_skinny[i].flag = 0;
+ cpu_to_le64(sg_dma_address(os_sgl));
+ mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
}
}
return sge_count;
@@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
pthru->pad_0 = 0;
- pthru->flags = flags;
- pthru->data_xfer_len = scsi_bufflen(scp);
+ pthru->flags = cpu_to_le16(flags);
+ pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
@@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
if ((scp->request->timeout / HZ) > 0xFFFF)
pthru->timeout = 0xFFFF;
else
- pthru->timeout = scp->request->timeout / HZ;
+ pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
}
/*
* Construct SGL
*/
if (instance->flag_ieee == 1) {
- pthru->flags |= MFI_FRAME_SGL64;
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
&pthru->sgl);
} else if (IS_DMA64) {
- pthru->flags |= MFI_FRAME_SGL64;
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
pthru->sge_count = megasas_make_sgl64(instance, scp,
&pthru->sgl);
} else
@@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Sense info specific
*/
pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
- pthru->sense_buf_phys_addr_hi = 0;
- pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+ pthru->sense_buf_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+ pthru->sense_buf_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
/*
* Compute the total number of frames this command consumes. FW uses
@@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
ldio->timeout = 0;
ldio->reserved_0 = 0;
ldio->pad_0 = 0;
- ldio->flags = flags;
+ ldio->flags = cpu_to_le16(flags);
ldio->start_lba_hi = 0;
ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
@@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* 6-byte READ(0x08) or WRITE(0x0A) cdb
*/
if (scp->cmd_len == 6) {
- ldio->lba_count = (u32) scp->cmnd[4];
- ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
- ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) |
+ (u32) scp->cmnd[3]);
- ldio->start_lba_lo &= 0x1FFFFF;
+ ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
}
/*
* 10-byte READ(0x28) or WRITE(0x2A) cdb
*/
else if (scp->cmd_len == 10) {
- ldio->lba_count = (u32) scp->cmnd[8] |
- ((u32) scp->cmnd[7] << 8);
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8));
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
/*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/
else if (scp->cmd_len == 12) {
- ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
/*
* 16-byte READ(0x88) or WRITE(0x8A) cdb
*/
else if (scp->cmd_len == 16) {
- ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
- ((u32) scp->cmnd[11] << 16) |
- ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) |
+ (u32) scp->cmnd[13]);
- ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
- ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
}
@@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Construct SGL
*/
if (instance->flag_ieee) {
- ldio->flags |= MFI_FRAME_SGL64;
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
&ldio->sgl);
} else if (IS_DMA64) {
- ldio->flags |= MFI_FRAME_SGL64;
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
} else
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
*/
ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
ldio->sense_buf_phys_addr_hi = 0;
- ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+ ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
/*
* Compute the total number of frames this command consumes. FW uses
@@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
ldio = (struct megasas_io_frame *)cmd->frame;
mfi_sgl = &ldio->sgl;
sgcount = ldio->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+ le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+ le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
}
else {
pthru = (struct megasas_pthru_frame *) cmd->frame;
mfi_sgl = &pthru->sgl;
sgcount = pthru->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+ pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+ le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
}
if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
for (n = 0; n < sgcount; n++){
if (IS_DMA64)
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
else
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
}
}
printk(KERN_ERR "\n");
@@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
spin_lock_irqsave(&instance->completion_lock, flags);
- producer = *instance->producer;
- consumer = *instance->consumer;
+ producer = le32_to_cpu(*instance->producer);
+ consumer = le32_to_cpu(*instance->consumer);
while (consumer != producer) {
- context = instance->reply_queue[consumer];
+ context = le32_to_cpu(instance->reply_queue[consumer]);
if (context >= instance->max_fw_cmds) {
printk(KERN_ERR "Unexpected context value %x\n",
context);
@@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
}
}
- *instance->consumer = producer;
+ *instance->consumer = cpu_to_le32(producer);
spin_unlock_irqrestore(&instance->completion_lock, flags);
@@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
- *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN;
+ *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
instance->instancet->disable_intr(instance);
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
@@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
struct megasas_header *hdr = &cmd->frame->hdr;
unsigned long flags;
struct fusion_context *fusion = instance->ctrl_context;
+ u32 opcode;
/* flag for the retry reset */
cmd->retry_for_fw_reset = 0;
@@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
- if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
- (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
if (cmd->frame->hdr.cmd_status != 0) {
@@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
- cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+ if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ opcode == MR_DCMD_CTRL_EVENT_GET) {
spin_lock_irqsave(&poll_aen_lock, flags);
megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
@@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
/*
* See if got an event notification
*/
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
megasas_service_aen(instance, cmd);
else
megasas_complete_int_cmd(instance, cmd);
@@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
*instance->consumer =
- MEGASAS_ADPRESET_INPROG_SIGN;
+ cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
@@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
}
memset(cmd->frame, 0, total_sz);
- cmd->frame->io.context = cmd->index;
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
- dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
if (!megasas_issue_polled(instance, cmd)) {
ret = 0;
@@ -3164,22 +3191,24 @@ megasas_get_pd_list(struct megasas_instance *instance)
pd_addr = ci->addr;
if ( ret == 0 &&
- (ci->count <
+ (le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
- memset(instance->pd_list, 0,
+ memset(instance->local_pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
- for (pd_index = 0; pd_index < ci->count; pd_index++) {
+ for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
- instance->pd_list[pd_addr->deviceId].tid =
- pd_addr->deviceId;
- instance->pd_list[pd_addr->deviceId].driveType =
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
+ le16_to_cpu(pd_addr->deviceId);
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
pd_addr->scsiDevType;
- instance->pd_list[pd_addr->deviceId].driveState =
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
MR_PD_STATE_SYSTEM;
pd_addr++;
}
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
}
pci_free_consistent(instance->pdev,
@@ -3207,6 +3236,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
struct megasas_dcmd_frame *dcmd;
struct MR_LD_LIST *ci;
dma_addr_t ci_h = 0;
+ u32 ld_count;
cmd = megasas_get_cmd(instance);
@@ -3233,12 +3263,12 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
- dcmd->opcode = MR_DCMD_LD_GET_LIST;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
if (!megasas_issue_polled(instance, cmd)) {
@@ -3247,12 +3277,14 @@ megasas_get_ld_list(struct megasas_instance *instance)
ret = -1;
}
+ ld_count = le32_to_cpu(ci->ldCount);
+
/* the following function will get the instance PD LIST */
- if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
+ if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
- for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+ for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] =
@@ -3271,6 +3303,87 @@ megasas_get_ld_list(struct megasas_instance *instance)
}
/**
+ * megasas_ld_list_query - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_TARGETID_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 tgtid_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_WARNING
+ "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_WARNING
+ "megasas: Failed to alloc mem for ld_list_query\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = query_type;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->pad_0 = 0;
+
+ if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
+ ret = 0;
+ } else {
+ /* On failure, call older LD list DCMD */
+ ret = 1;
+ }
+
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+ ids = ci->targetId[ld_index];
+ instance->ld_ids[ids] = ci->targetId[ld_index];
+ }
+
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ ci, ci_h);
+
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
* @ctrl_info: Controller information structure
@@ -3313,13 +3426,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
- dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
if (!megasas_issue_polled(instance, cmd)) {
ret = 0;
@@ -3375,17 +3488,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
init_frame->context = context;
- initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
- initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+ initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+ initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
- initq_info->producer_index_phys_addr_lo = instance->producer_h;
- initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+ initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+ initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
- init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(initq_info_h));
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(initq_info_h));
- init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
/*
* disable the intr before firing the init frame to FW
@@ -3648,7 +3764,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_get_pd_list(instance);
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
@@ -3665,8 +3783,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- ctrl_info->max_strips_per_io;
- max_sectors_2 = ctrl_info->max_request_size;
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
@@ -3675,14 +3793,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->is_imr = 0;
dev_info(&instance->pdev->dev, "Controller type: MR,"
"Memory size is: %dMB\n",
- ctrl_info->memory_size);
+ le16_to_cpu(ctrl_info->memory_size));
} else {
instance->is_imr = 1;
dev_info(&instance->pdev->dev,
"Controller type: iMR\n");
}
+ /* OnOffProperties are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ /* adapterOperations2 are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -3696,7 +3818,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
}
-
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -3802,20 +3923,24 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = el_info_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
megasas_issue_blocked_cmd(instance, cmd);
/*
* Copy the data back into callers buffer
*/
- memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -3862,6 +3987,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
if (instance->aen_cmd) {
prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+ prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
/*
* A class whose enum value is smaller is inclusive of all
@@ -3917,16 +4043,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+ dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
- dcmd->mbox.w[0] = seq_num;
- dcmd->mbox.w[1] = curr_aen.word;
- dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+ dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
@@ -3972,8 +4098,9 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- return megasas_register_aen(instance, eli.newest_seq_num + 1,
- class_locale.word);
+ return megasas_register_aen(instance,
+ eli.newest_seq_num + 1,
+ class_locale.word);
}
/**
@@ -4068,6 +4195,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
goto fail_set_dma_mask;
}
+
return 0;
fail_set_dma_mask:
@@ -4386,11 +4514,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
- dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
megasas_issue_blocked_cmd(instance, cmd);
@@ -4431,11 +4559,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
- dcmd->opcode = opcode;
+ dcmd->opcode = cpu_to_le32(opcode);
megasas_issue_blocked_cmd(instance, cmd);
@@ -4850,10 +4978,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* alone separately
*/
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
- cmd->frame->hdr.context = cmd->index;
+ cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
- MFI_FRAME_SENSE64);
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+ MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
/*
* The management interface between applications and the fw uses
@@ -4887,8 +5016,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = (u32) buf_handle;
- kern_sge32[i].length = ioc->sgl[i].iov_len;
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
/*
* We created a kernel buffer corresponding to the
@@ -4911,7 +5040,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = sense_handle;
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
@@ -4971,9 +5100,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i])
dma_free_coherent(&instance->pdev->dev,
- kern_sge32[i].length,
+ le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
- kern_sge32[i].phys_addr);
+ le32_to_cpu(kern_sge32[i].phys_addr));
}
megasas_return_cmd(instance, cmd);
@@ -5327,7 +5456,7 @@ megasas_aen_polling(struct work_struct *work)
host = instance->host;
if (instance->evt_detail) {
- switch (instance->evt_detail->code) {
+ switch (le32_to_cpu(instance->evt_detail->code)) {
case MR_EVT_PD_INSERTED:
if (megasas_get_pd_list(instance) == 0) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -5389,7 +5518,9 @@ megasas_aen_polling(struct work_struct *work)
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5399,7 +5530,7 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i + MEGASAS_MAX_LD_CHANNELS,
+ MEGASAS_MAX_PD_CHANNELS + i,
j,
0);
@@ -5418,7 +5549,9 @@ megasas_aen_polling(struct work_struct *work)
doscan = 0;
break;
case MR_EVT_LD_CREATED:
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5427,14 +5560,14 @@ megasas_aen_polling(struct work_struct *work)
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i+MEGASAS_MAX_LD_CHANNELS,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
if (instance->ld_ids[ld_index] !=
0xff) {
if (!sdev1) {
scsi_add_device(host,
- i + 2,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
}
}
@@ -5483,18 +5616,20 @@ megasas_aen_polling(struct work_struct *work)
}
}
- megasas_get_ld_list(instance);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
- i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
if (instance->ld_ids[ld_index] != 0xff) {
if (!sdev1) {
scsi_add_device(host,
- i+2,
+ MEGASAS_MAX_PD_CHANNELS + i,
j, 0);
} else {
scsi_device_put(sdev1);
@@ -5514,7 +5649,7 @@ megasas_aen_polling(struct work_struct *work)
return ;
}
- seq_num = instance->evt_detail->seq_num + 1;
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 4f401f753f8e..e24b6eb645b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.arMapInfo[ar].pd[arm];
+ return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
}
-static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
}
-static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
@@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
{
- return map->raidMap.ldTgtIdToLd[ldTgtId];
+ return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ struct MR_LD_RAID *raid;
+ int ldCount, num_lds;
+ u16 ld;
+
- if (pFwRaidMap->totalSize !=
+ if (le32_to_cpu(pFwRaidMap->totalSize) !=
(sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
+ (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
(unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
sizeof(struct MR_LD_SPAN_MAP)) +
(sizeof(struct MR_LD_SPAN_MAP) *
- pFwRaidMap->ldCount)));
+ le32_to_cpu(pFwRaidMap->ldCount))));
printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
- pFwRaidMap->totalSize);
+ le32_to_cpu(pFwRaidMap->totalSize));
return 0;
}
@@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
mr_update_load_balance_params(map, lbInfo);
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+ /*Convert Raid capability values to CPU arch */
+ for (ldCount = 0; ldCount < num_lds; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ raid = MR_LdRaidGet(ld, map);
+ le32_to_cpus((u32 *)&raid->capability);
+ }
+
return 1;
}
@@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
- for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+ for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
quad = &pSpanBlock->block_span_info.quad[j];
- if (quad->diff == 0)
+ if (le32_to_cpu(quad->diff) == 0)
return SPAN_INVALID;
- if (quad->logStart <= row && row <= quad->logEnd &&
- (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row && row <=
+ le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
u64 blk, debugBlk;
- blk =
- mega_div64_32(
- (row-quad->logStart),
- quad->diff);
+ blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
debugBlk = blk;
- blk = (blk + quad->offsetInSpan) <<
- raid->stripeShift;
+ blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
}
return span;
@@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
for (span = 0; span < raid->spanDepth; span++)
dev_dbg(&instance->pdev->dev, "Span=%x,"
" number of quads=%x\n", span,
- map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements);
+ le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements));
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
span_set = &(ldSpanInfo[ld].span_set[element]);
if (span_set->span_row_data_width == 0)
@@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
(long unsigned int)span_set->data_strip_end);
for (span = 0; span < raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >=
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >=
element + 1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info.
quad[element];
dev_dbg(&instance->pdev->dev, "Span=%x,"
"Quad=%x, diff=%x\n", span,
- element, quad->diff);
+ element, le32_to_cpu(quad->diff));
dev_dbg(&instance->pdev->dev,
"offset_in_span=0x%08lx\n",
- (long unsigned int)quad->offsetInSpan);
+ (long unsigned int)le64_to_cpu(quad->offsetInSpan));
dev_dbg(&instance->pdev->dev,
"logical start=0x%08lx, end=0x%08lx\n",
- (long unsigned int)quad->logStart,
- (long unsigned int)quad->logEnd);
+ (long unsigned int)le64_to_cpu(quad->logStart),
+ (long unsigned int)le64_to_cpu(quad->logEnd));
}
}
}
@@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
continue;
for (span = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].
block_span_info.quad[info];
- if (quad->diff == 0)
+ if (le32_to_cpu(quad->diff == 0))
return SPAN_INVALID;
- if (quad->logStart <= row &&
- row <= quad->logEnd &&
- (mega_mod64(row - quad->logStart,
- quad->diff)) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
u64 blk;
blk = mega_div64_32
- ((row - quad->logStart),
- quad->diff);
- blk = (blk + quad->offsetInSpan)
+ ((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ blk = (blk + le64_to_cpu(quad->offsetInSpan))
<< raid->stripeShift;
*span_blk = blk;
}
@@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
span_set_Row = mega_div64_32(span_set_Strip,
span_set->span_row_data_width) * span_set->diff;
for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info+1)) {
if (strip_offset >=
span_set->strip_offset[span])
span_offset++;
@@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
continue;
for (span = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
quad = &map->raidMap.ldSpanMap[ld].
spanBlock[span].block_span_info.quad[info];
- if (quad->logStart <= row &&
- row <= quad->logEnd &&
- mega_mod64((row - quad->logStart),
- quad->diff) == 0) {
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ mega_mod64((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff)) == 0) {
strip = mega_div64_32
(((row - span_set->data_row_start)
- - quad->logStart),
- quad->diff);
+ - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
strip *= span_set->span_row_data_width;
strip += span_set->data_strip_start;
strip += span_set->strip_offset[span];
@@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
span_set->span_row_data_width);
for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements >= info+1) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
if (strip_offset >=
span_set->strip_offset[span])
span_offset =
@@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
}
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
return retval;
@@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
}
- *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
return retval;
@@ -784,7 +794,7 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
{
struct MR_LD_RAID *raid;
u32 ld, stripSize, stripe_mask;
@@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
regSize += stripSize;
}
- pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
+ pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
pRAID_Context->regLockFlags = (isRead) ?
@@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance,
pRAID_Context->regLockFlags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->VirtualDiskTgtId = raid->targetId;
- pRAID_Context->regLockRowLBA = regStart;
- pRAID_Context->regLockLength = regSize;
+ pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
+ pRAID_Context->regLockLength = cpu_to_le32(regSize);
pRAID_Context->configSeqNum = raid->seqNum;
+ /* save pointer to raid->LUN array */
+ *raidLUN = raid->LUN;
+
/*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/
@@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
for (span = 0; span < raid->spanDepth; span++) {
- if (map->raidMap.ldSpanMap[ld].spanBlock[span].
- block_span_info.noElements <
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) <
element + 1)
continue;
span_set = &(ldSpanInfo[ld].span_set[element]);
@@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
spanBlock[span].block_span_info.
quad[element];
- span_set->diff = quad->diff;
+ span_set->diff = le32_to_cpu(quad->diff);
for (count = 0, span_row_width = 0;
count < raid->spanDepth; count++) {
- if (map->raidMap.ldSpanMap[ld].
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
spanBlock[count].
block_span_info.
- noElements >= element + 1) {
+ noElements) >= element + 1) {
span_set->strip_offset[count] =
span_row_width;
span_row_width +=
@@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
}
span_set->span_row_data_width = span_row_width;
- span_row = mega_div64_32(((quad->logEnd -
- quad->logStart) + quad->diff),
- quad->diff);
+ span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+ le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+ le32_to_cpu(quad->diff));
if (element == 0) {
span_set->log_start_lba = 0;
@@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set->data_row_start = 0;
span_set->data_row_end =
- (span_row * quad->diff) - 1;
+ (span_row * le32_to_cpu(quad->diff)) - 1;
} else {
span_set_prev = &(ldSpanInfo[ld].
span_set[element - 1]);
@@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
span_set_prev->data_row_end + 1;
span_set->data_row_end =
span_set->data_row_start +
- (span_row * quad->diff) - 1;
+ (span_row * le32_to_cpu(quad->diff)) - 1;
}
break;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 417d5f167aa2..f6555921fd7a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
int
megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd);
-
-u8
-MR_BuildRaidContext(struct megasas_instance *instance,
- struct IO_REQUEST_INFO *io_info,
- struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-
void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
@@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
- IOCInitMessage->MsgVersion = MPI2_VERSION;
- IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
- IOCInitMessage->SystemRequestFrameSize =
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
-
- IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
- IOCInitMessage->ReplyDescriptorPostQueueAddress =
- fusion->reply_frames_desc_phys;
- IOCInitMessage->SystemRequestFrameBaseAddress =
- fusion->io_request_frames_phys;
+ IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+ IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+ IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
@@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
+ /* driver supports HA / Remote LUN over Fast Path interface */
+ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+ = 1;
+ /* Convert capability to LE32 */
+ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
- init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
- init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+ init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
req_desc =
(union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
- req_desc->Words = cmd->frame_phys_addr;
+ req_desc->Words = 0;
req_desc->MFAIo.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cpu_to_le32s((u32 *)&req_desc->MFAIo);
+ req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
/*
* disable the intr before firing the init frame
@@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = size_map_info;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = size_map_info;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
if (!megasas_issue_polled(instance, cmd))
ret = 0;
@@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
map = fusion->ld_map[instance->map_id & 1];
- num_lds = map->raidMap.ldCount;
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
dcmd = &cmd->frame->dcmd;
@@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_WRITE;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- dcmd->data_xfer_len = size_map_info;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = size_map_info;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
instance->map_update_cmd = cmd;
@@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
spin_lock_irqsave(&instance->hba_lock, flags);
- writel(req_desc_lo,
- &(regs)->inbound_low_queue_port);
- writel(req_desc_hi, &(regs)->inbound_high_queue_port);
+ writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
return sge_count;
scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- sgl_ptr->Length = sg_dma_len(os_sgl);
- sgl_ptr->Address = sg_dma_address(os_sgl);
+ sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+ sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
PCI_DEVICE_ID_LSI_INVADER) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_FURY)) {
- if ((cmd->io_request->IoFlags &
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset =
fusion->
chain_offset_io_request;
@@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain->Flags =
(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
- sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION)
- *(sge_count - sg_processed));
- sg_chain->Address = cmd->sg_frame_phys_addr;
+ sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+ sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
sgl_ptr =
(struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
io_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(ref_tag);
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
- io_request->IoFlags = 32; /* Specify 32-byte cdb */
+ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
/* Transfer length */
cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
/* set SCSI IO EEDPFlags */
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
- io_request->EEDPFlags =
+ io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
} else {
- io_request->EEDPFlags =
+ io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
}
- io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = scp->device->sector_size;
+ io_request->Control |= cpu_to_le32((0x4 << 26));
+ io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[8] = (u8)(num_blocks & 0xff);
cdb[7] = (u8)((num_blocks >> 8) & 0xff);
- io_request->IoFlags = 10; /* Specify 10-byte cdb */
+ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
cdb_len = 10;
} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
/* Convert to 16 byte CDB for large LBA's */
@@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[11] = (u8)((num_blocks >> 16) & 0xff);
cdb[10] = (u8)((num_blocks >> 24) & 0xff);
- io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
cdb_len = 16;
}
@@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ u8 *raidLUN;
device_id = MEGASAS_DEV_INDEX(instance, scp);
fusion = instance->ctrl_context;
io_request = cmd->io_request;
- io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->RaidContext.status = 0;
io_request->RaidContext.exStatus = 0;
@@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
io_info.numBlocks = datalength;
io_info.ldTgtId = device_id;
- io_request->DataLength = scsi_bufflen(scp);
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1;
@@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else {
if (MR_BuildRaidContext(instance, &io_info,
&io_request->RaidContext,
- local_map_ptr))
+ local_map_ptr, &raidLUN))
fp_possible = io_info.fpOkForIo;
}
@@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
io_request->RaidContext.Type = MPI2_TYPE_CUDA;
io_request->RaidContext.nseg = 0x1;
- io_request->IoFlags |=
- MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
io_request->RaidContext.regLockFlags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
@@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
io_request->DevHandle = io_info.devHandle;
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raidLUN, 8);
} else {
io_request->RaidContext.timeoutValue =
- local_map_ptr->raidMap.fpPdIoTimeoutSec;
+ cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.nseg = 0x1;
}
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
+ io_request->DevHandle = cpu_to_le16(device_id);
} /* Not FP */
}
@@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u16 pd_index = 0;
struct MR_FW_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
+ u8 span, physArm;
+ u16 devHandle;
+ u32 ld, arRef, pd;
+ struct MR_LD_RAID *raid;
+ struct RAID_CONTEXT *pRAID_Context;
io_request = cmd->io_request;
device_id = MEGASAS_DEV_INDEX(instance, scmd);
@@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+scmd->device->id;
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+
/* Check if this is a system PD I/O */
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
scmd->request->timeout / HZ;
}
} else {
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+ if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ /* check if this LD is FP capable */
+ if (!(raid->capability.fpNonRWCapable))
+ /* not FP capable, send as non-FP */
+ goto NonFastPath;
+
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+
+ /* set RAID context values */
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /* get the DevHandle for the PD (since this is
+ fpNonRWCapable, this is a single disk RAID0) */
+ span = physArm = 0;
+ arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+ pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+ devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+ /* build request descriptor */
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raid->LUN, 8);
+
+ /* build the raidScsiIO structure */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->DevHandle = devHandle;
+
+ return;
+
+NonFastPath:
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
+ io_request->DevHandle = cpu_to_le16(device_id);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
io_request->LUN[1] = scmd->device->lun;
- io_request->DataLength = scsi_bufflen(scmd);
}
/**
@@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
* Just the CDB length,rest of the Flags are zero
* This will be modified for FP in build_ldio_fusion
*/
- io_request->IoFlags = scp->cmd_len;
+ io_request->IoFlags = cpu_to_le16(scp->cmd_len);
if (megasas_is_ldio(scp))
megasas_build_ldio_fusion(instance, scp, cmd);
@@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->RaidContext.numSGE = sge_count;
- io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
if (scp->sc_data_direction == PCI_DMA_TODEVICE)
- io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
- io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+ io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
}
req_desc = cmd->request_desc;
- req_desc->SCSIIO.SMID = index;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
if (cmd->io_request->ChainOffset != 0 &&
cmd->io_request->ChainOffset != 0xF)
@@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
num_completed = 0;
while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
- smid = reply_desc->SMID;
+ smid = le16_to_cpu(reply_desc->SMID);
cmd_fusion = fusion->cmd_list[smid - 1];
@@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
SGL) / 4;
io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
- mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+ mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
- mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
+ mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
return 0;
}
@@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- req_desc->SCSIIO.SMID = index;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
return req_desc;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 12ff01cf6799..35a51397b364 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -88,13 +88,18 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FUSION_IN_RESET 0
/*
- * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * Raid Context structure which describes MegaRAID specific IO Parameters
* This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
*/
struct RAID_CONTEXT {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 nseg:4;
+ u8 Type:4;
+#else
u8 Type:4;
u8 nseg:4;
+#endif
u8 resvd0;
u16 timeoutValue;
u8 regLockFlags;
@@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
* MPT RAID MFA IO Descriptor.
*/
struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 MessageAddress1:24; /* bits 31:8*/
+ u32 RequestFlags:8;
+#else
u32 RequestFlags:8;
u32 MessageAddress1:24; /* bits 31:8*/
+#endif
u32 MessageAddress2; /* bits 61:32 */
};
@@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_LD_RAID {
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved4:7;
+ u32 fpNonRWCapable:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteCapable:1;
+ u32 encryptionType:8;
+ u32 pdPiMode:4;
+ u32 ldPiMode:4;
+ u32 reserved5:3;
+ u32 fpCapable:1;
+#else
u32 fpCapable:1;
u32 reserved5:3;
u32 ldPiMode:4;
@@ -527,7 +550,9 @@ struct MR_LD_RAID {
u32 fpReadCapable:1;
u32 fpWriteAcrossStripe:1;
u32 fpReadAcrossStripe:1;
- u32 reserved4:8;
+ u32 fpNonRWCapable:1;
+ u32 reserved4:7;
+#endif
} capability;
u32 reserved6;
u64 size;
@@ -551,7 +576,9 @@ struct MR_LD_RAID {
u32 reserved:31;
} flags;
- u8 reserved3[0x5C];
+ u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+ u8 reserved3[0x80-0x2D]; /* 0x2D */
};
struct MR_LD_SPAN_MAP {
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 31b5b15a4726..7b14a015c903 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.27
+ * mpi2.h Version: 02.00.28
*
* Version History
* ---------------
@@ -77,6 +77,7 @@
* Added Hard Reset delay timings.
* 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
* 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -102,7 +103,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x1B)
+#define MPI2_HEADER_VERSION_UNIT (0x1C)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 737fa8cfb54a..88cb7f828bbd 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2011 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.22
+ * mpi2_cnfg.h Version: 02.00.23
*
* Version History
* ---------------
@@ -149,6 +149,8 @@
* 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
* Added UEFIVersion field to BIOS Page 1 and defined new
* BiosOptions bits.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
* --------------------------------------------------------------------------
*/
@@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
/* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
+
#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 963761fb8462..9d284dae6553 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index e93f8f53adf9..d159c5f24aab 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 255b0ca219a4..0d202a2c6db7 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_raid.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index fdffde1ebc0f..50b39ccd526a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 67c387f10e59..11b2ac4e7c6e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index cfde017bf16e..0b128b68a5ea 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2007 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ccd6d5a97ec3..3901edc35812 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -768,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-static u8
+static void
_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
{
Mpi2EventNotificationReply_t *mpi_reply;
@@ -780,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (!mpi_reply)
- return 1;
+ return;
if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
- return 1;
+ return;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
_base_display_event_data(ioc, mpi_reply);
#endif
@@ -812,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
/* ctl callback handler */
mpt2sas_ctl_event_callback(ioc, msix_index, reply);
- return 1;
+ return;
}
/**
@@ -1409,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
int i;
u8 try_msix = 0;
- INIT_LIST_HEAD(&ioc->reply_queue_list);
-
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1489,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (pci_enable_device_mem(pdev)) {
printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
"failed\n", ioc->name);
+ ioc->bars = 0;
return -ENODEV;
}
@@ -1497,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
MPT2SAS_DRIVER_NAME)) {
printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
"failed\n", ioc->name);
+ ioc->bars = 0;
r = -ENODEV;
goto out_fail;
}
@@ -4229,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
- _base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
- ioc->shost_recovery = 0;
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->chip_phys)
+
+ if (ioc->chip_phys && ioc->chip)
iounmap(ioc->chip);
ioc->chip_phys = 0;
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
return;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 6fbd08417773..1f2ac3a28621 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "15.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 15
+#define MPT2SAS_DRIVER_VERSION "16.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 16
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -1061,7 +1061,7 @@ void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
/* scsih shared API */
-u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
uint channel, uint id, uint lun, u8 type, u16 smid_task,
@@ -1144,7 +1144,7 @@ void mpt2sas_ctl_exit(void);
u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
-u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventNotificationReply_t *mpi_reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 863778071a9d..0c47425c73f2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index eec052c2670a..b7f887c9b0bf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-u8
+void
mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply)
{
Mpi2EventNotificationReply_t *mpi_reply;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
- return 1;
+ return;
}
/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index b5eb0d1b8ea6..8b2ac1869dcc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 69cc7d0c112c..a9021cbd6628 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 51004768d0f5..7f0af4fcc001 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -628,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
* devices while scanning is turned on due to an oops in
* scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
*/
- if (!ioc->is_driver_loading)
+ if (!ioc->is_driver_loading) {
mpt2sas_transport_port_remove(ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
}
@@ -1402,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct scsi_target *starget;
struct _raid_device *raid_device;
+ struct _sas_device *sas_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1430,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && (sas_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : sas_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ sas_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
return 0;
}
@@ -6753,7 +6768,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(sas_device_pg0.DevHandle);
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -6862,7 +6877,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
&volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -6887,7 +6902,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
phys_disk_num))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
phys_disk_num = pd_pg0.PhysDiskNum;
handle = le16_to_cpu(pd_pg0.DevHandle);
@@ -6967,7 +6982,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -7109,8 +7124,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7153,8 +7166,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
phys_disk_num))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7219,8 +7230,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
&volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7278,8 +7287,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
" ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7471,10 +7478,9 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-u8
+void
mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply)
{
@@ -7485,14 +7491,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
/* events turned off due to host reset or driver unloading */
if (ioc->remove_host || ioc->pci_error_recovery)
- return 1;
+ return;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- return 1;
+ return;
}
event = le16_to_cpu(mpi_reply->Event);
@@ -7507,11 +7513,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
if (baen_data->Primitive !=
MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
- return 1;
+ return;
if (ioc->broadcast_aen_busy) {
ioc->broadcast_aen_pending++;
- return 1;
+ return;
} else
ioc->broadcast_aen_busy = 1;
break;
@@ -7587,14 +7593,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
break;
default: /* ignore the rest */
- return 1;
+ return;
}
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- return 1;
+ return;
}
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
@@ -7602,7 +7608,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
kfree(fw_event);
- return 1;
+ return;
}
memcpy(fw_event->event_data, mpi_reply->EventData,
@@ -7612,7 +7618,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
fw_event->VP_ID = mpi_reply->VP_ID;
fw_event->event = event;
_scsih_fw_event_add(ioc, fw_event);
- return 1;
+ return;
}
/* shost template */
@@ -7711,10 +7717,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- /* are there any volumes ? */
- if (list_empty(&ioc->raid_device_list))
- return;
-
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
@@ -7929,10 +7931,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
- mpt2sas_transport_port_remove(ioc, sas_address,
+ if (!ioc->is_driver_loading) {
+ mpt2sas_transport_port_remove(ioc,
+ sas_address,
sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
}
}
@@ -7985,14 +7989,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
kfree(sas_device);
continue;
} else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
+ if (!ioc->is_driver_loading) {
mpt2sas_transport_port_remove(ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- list_del(&sas_device->list);
- kfree(sas_device);
- continue;
-
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_move_tail(&sas_device->list, &ioc->sas_device_list);
@@ -8175,6 +8179,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
/* init shost parameters */
shost->max_cmd_len = 32;
@@ -8280,6 +8285,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt2sas_base_stop_watchdog(ioc);
scsi_block_requests(shost);
+ _scsih_ir_shutdown(ioc);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
"operating state [D%d]\n", ioc->name, pdev,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 193e7ae90c3b..9d26637308be 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
&mpt2sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
- } else
+ } else {
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ mpt2sas_phy);
+ }
if (mpt2sas_phy->phy)
mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 4c1d2e7a1176..efb0c4c2e310 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,5 +1,5 @@
# mpt3sas makefile
-obj-m += mpt3sas.o
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
mpt3sas-y += mpt3sas_base.o \
mpt3sas_config.o \
mpt3sas_scsih.o \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5dc280c75325..fa785062e97b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -82,6 +82,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+static int max_msix_vectors = 8;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors,
+ " max msix vectors - (default=8)");
static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int i;
u8 try_msix = 0;
- INIT_LIST_HEAD(&ioc->reply_queue_list);
-
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
+ printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
+ ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
+ ioc->cpu_count, max_msix_vectors);
+
+ if (max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ ioc->reply_queue_count);
+ ioc->msix_vector_count = ioc->reply_queue_count;
+ }
+
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
@@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_enable_device_mem(pdev)) {
pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
ioc->name);
+ ioc->bars = 0;
return -ENODEV;
}
@@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
MPT3SAS_DRIVER_NAME)) {
pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
ioc->name);
+ ioc->bars = 0;
r = -ENODEV;
goto out_fail;
}
@@ -4393,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
- _base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
- ioc->shost_recovery = 0;
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->chip_phys)
+
+ if (ioc->chip_phys && ioc->chip)
iounmap(ioc->chip);
ioc->chip_phys = 0;
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
return;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cbe8fd21fc4..a961fe11b527 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7779,6 +7779,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
/* init shost parameters */
shost->max_cmd_len = 32;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dcadd56860ff..e771a88c6a74 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
&mpt3sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
- } else
+ } else {
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ mpt3sas_phy);
+ }
if (mpt3sas_phy->phy)
mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 9d86947d67fe..e1d9a4c4c4b3 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -107,6 +107,7 @@ static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
class_dev);
return sprintf(buf, "%s\n", ould->odi.osdname);
}
+static DEVICE_ATTR_RO(osdname);
static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -117,17 +118,19 @@ static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
return ould->odi.systemid_len;
}
+static DEVICE_ATTR_RO(systemid);
-static struct device_attribute osd_uld_attrs[] = {
- __ATTR(osdname, S_IRUGO, osdname_show, NULL),
- __ATTR(systemid, S_IRUGO, systemid_show, NULL),
- __ATTR_NULL,
+static struct attribute *osd_uld_attrs[] = {
+ &dev_attr_osdname.attr,
+ &dev_attr_systemid.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(osd_uld);
static struct class osd_uld_class = {
.owner = THIS_MODULE,
.name = "scsi_osd",
- .dev_attrs = osd_uld_attrs,
+ .dev_groups = osd_uld_groups,
};
/*
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d99f41c2ca13..a04b4ff8c7f6 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -309,6 +309,117 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
}
static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
/**
+ * pm8001_ctl_ib_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define IB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[IB].virt_ptr + \
+ pm8001_ha->evtlog_ib_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ if (pm8001_ha->chip_id != chip_8001)
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ else
+ str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
+ if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id != chip_8001))
+ pm8001_ha->evtlog_ib_offset = 0;
+ if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id == chip_8001))
+ pm8001_ha->evtlog_ib_offset = 0;
+
+ return str - buf;
+}
+
+static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
+/**
+ * pm8001_ctl_ob_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int offset;
+ char *str = buf;
+ int start = 0;
+#define OB_MEMMAP(c) \
+ (*(u32 *)((u8 *)pm8001_ha-> \
+ memoryMap.region[OB].virt_ptr + \
+ pm8001_ha->evtlog_ob_offset + (c)))
+
+ for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+ if (pm8001_ha->chip_id != chip_8001)
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ else
+ str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+ start = start + 4;
+ }
+ pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
+ if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id != chip_8001))
+ pm8001_ha->evtlog_ob_offset = 0;
+ if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+ && (pm8001_ha->chip_id == chip_8001))
+ pm8001_ha->evtlog_ob_offset = 0;
+
+ return str - buf;
+}
+static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
+/**
+ * pm8001_ctl_bios_version_show - Bios version Display
+ * @cdev:pointer to embedded class device
+ * @buf:the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ char *str = buf;
+ void *virt_addr;
+ int bios_index;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+
+ pm8001_ha->nvmd_completion = &completion;
+ payload.minor_function = 7;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ wait_for_completion(&completion);
+ virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+ for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
+ bios_index++)
+ str += sprintf(str, "%c",
+ *((u8 *)((u8 *)virt_addr+bios_index)));
+ return str - buf;
+}
+static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
+/**
* pm8001_ctl_aap_log_show - IOP event log
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -344,6 +455,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
+/**
+ ** pm8001_ctl_fatal_log_show - fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+
+static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm80xx_get_fatal_dump(cdev, attr, buf);
+ return count;
+}
+
+static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+
+
+/**
+ ** pm8001_ctl_gsm_log_show - gsm dump collection
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
+ return count;
+}
+
+static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
+
#define FLASH_CMD_NONE 0x00
#define FLASH_CMD_UPDATE 0x01
#define FLASH_CMD_SET_NVMD 0x02
@@ -603,12 +751,17 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_update_fw,
&dev_attr_aap_log,
&dev_attr_iop_log,
+ &dev_attr_fatal_log,
+ &dev_attr_gsm_log,
&dev_attr_max_out_io,
&dev_attr_max_devices,
&dev_attr_max_sg_list,
&dev_attr_sas_spec_support,
&dev_attr_logging_level,
&dev_attr_host_sas_address,
+ &dev_attr_bios_version,
+ &dev_attr_ib_log,
+ &dev_attr_ob_log,
NULL,
};
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 63ad4aa0c422..d0d43a250b9e 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,6 +45,8 @@
#define HEADER_LEN 28
#define SIZE_OFFSET 16
+#define BIOSOFFSET 56
+#define BIOS_OFFSET_LIMIT 61
#define FLASH_OK 0x000000
#define FAIL_OPEN_BIOS_FILE 0x000100
@@ -53,5 +55,9 @@
#define FAIL_OUT_MEMORY 0x000c00
#define FLASH_IN_PROGRESS 0x001000
+#define IB_OB_READ_TIMES 256
+#define SYSFS_OFFSET 1024
+#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
+#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
#endif /* PM8001_CTL_H_INCLUDED */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 479c5a7a863a..74a4bb9af07b 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -46,7 +46,10 @@ enum chip_flavors {
chip_8008,
chip_8009,
chip_8018,
- chip_8019
+ chip_8019,
+ chip_8074,
+ chip_8076,
+ chip_8077
};
enum phy_speed {
@@ -99,7 +102,8 @@ enum memory_region_num {
NVMD, /* NVM device */
DEV_MEM, /* memory for devices */
CCB_MEM, /* memory for command control block */
- FW_FLASH /* memory for fw flash update */
+ FW_FLASH, /* memory for fw flash update */
+ FORENSIC_MEM /* memory for fw forensic data */
};
#define PM8001_EVENT_LOG_SIZE (128 * 1024)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4a2195752198..f16ece91b94a 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param;
u32 status;
u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low;
+ u8 sata_addr_hi[4];
+ u32 temp_sata_addr_hi;
struct sata_completion_resp *psataPayload;
struct task_status_struct *ts;
struct ata_task_resp *resp ;
@@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
-
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3087,8 +3138,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = ccb->device;
u32 status = le32_to_cpu(pPayload->status);
u32 device_id = le32_to_cpu(pPayload->device_id);
- u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
- u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
+ u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
+ u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
"from 0x%x to 0x%x status = 0x%x!\n",
device_id, pds, nds, status));
@@ -4700,6 +4751,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+ if (pm8001_ha->chip_id != chip_8001)
+ sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
return ret;
@@ -4778,6 +4831,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
break;
}
+ case IOP_RDUMP: {
+ nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
+ nvmd_req.resp_addr_hi =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+ nvmd_req.resp_addr_lo =
+ cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+ break;
+ }
default:
break;
}
@@ -4938,6 +5001,89 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return rc;
}
+ssize_t
+pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
+{
+ u32 value, rem, offset = 0, bar = 0;
+ u32 index, work_offset, dw_length;
+ u32 shift_value, gsm_base, gsm_dump_offset;
+ char *direct_data;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ direct_data = buf;
+ gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
+
+ /* check max is 1 Mbytes */
+ if ((length > 0x100000) || (gsm_dump_offset & 3) ||
+ ((gsm_dump_offset + length) > 0x1000000))
+ return 1;
+
+ if (pm8001_ha->chip_id == chip_8001)
+ bar = 2;
+ else
+ bar = 1;
+
+ work_offset = gsm_dump_offset & 0xFFFF0000;
+ offset = gsm_dump_offset & 0x0000FFFF;
+ gsm_dump_offset = work_offset;
+ /* adjust length to dword boundary */
+ rem = length & 3;
+ dw_length = length >> 2;
+
+ for (index = 0; index < dw_length; index++) {
+ if ((work_offset + offset) & 0xFFFF0000) {
+ if (pm8001_ha->chip_id == chip_8001)
+ shift_value = ((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK);
+ else
+ shift_value = (((gsm_dump_offset + offset) &
+ SHIFT_REG_64K_MASK) >>
+ SHIFT_REG_BIT_SHIFT);
+
+ if (pm8001_ha->chip_id == chip_8001) {
+ gsm_base = GSM_BASE;
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return 1;
+ } else {
+ gsm_base = 0;
+ if (-1 == pm80xx_bar4_shift(pm8001_ha,
+ (gsm_base + shift_value)))
+ return 1;
+ }
+ gsm_dump_offset = (gsm_dump_offset + offset) &
+ 0xFFFF0000;
+ work_offset = 0;
+ offset = offset & 0x0000FFFF;
+ }
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ direct_data += sprintf(direct_data, "%08x ", value);
+ offset += 4;
+ }
+ if (rem != 0) {
+ value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+ 0x0000FFFF);
+ /* xfr for non_dw */
+ direct_data += sprintf(direct_data, "%08x ", value);
+ }
+ /* Shift back to BAR4 original address */
+ if (pm8001_ha->chip_id == chip_8001) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+ return 1;
+ } else {
+ if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
+ return 1;
+ }
+ pm8001_ha->fatal_forensic_shift_offset += 1024;
+
+ if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ return direct_data - buf;
+}
+
int
pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d7c1e2034226..6d91e2446542 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -1027,5 +1027,8 @@ struct set_dev_state_resp {
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+#define GSM_BASE 0x4F0000
+#define SHIFT_REG_64K_MASK 0xffff0000
+#define SHIFT_REG_BIT_SHIFT 8
#endif
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3861aa1f4520..662bf13c42f0 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
[chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
+ [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
+ [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
+ [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
};
static int pm8001_id;
@@ -344,6 +347,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
/* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -424,7 +431,8 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"base addr %llx virt_addr=%llx len=%d\n",
(u64)pm8001_ha->io_mem[logicalBar].membase,
- (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
+ (u64)(unsigned long)
+ pm8001_ha->io_mem[logicalBar].memvirtaddr,
pm8001_ha->io_mem[logicalBar].memsize));
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
@@ -663,6 +671,31 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
#endif
}
+/*
+ * pm8001_get_phy_settings_info : Read phy setting values.
+ * @pm8001_ha : our hba.
+ */
+void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+{
+
+#ifdef PM8001_READ_VPD
+ /*OPTION ROM FLASH read for the SPC cards */
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
+
+ pm8001_ha->nvmd_completion = &completion;
+ /* SAS ADDRESS read from flash / EEPROM */
+ payload.minor_function = 6;
+ payload.offset = 0;
+ payload.length = 4096;
+ payload.func_specific = kzalloc(4096, GFP_KERNEL);
+ /* Read phy setting values from flash */
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ wait_for_completion(&completion);
+ pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+#endif
+}
+
#ifdef PM8001_USE_MSIX
/**
* pm8001_setup_msix - enable MSI-X interrupt
@@ -734,7 +767,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
pdev = pm8001_ha->pdev;
#ifdef PM8001_USE_MSIX
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ if (pdev->msix_cap)
return pm8001_setup_msix(pm8001_ha);
else {
PM8001_INIT_DBG(pm8001_ha,
@@ -843,6 +876,10 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
}
pm8001_init_sas_add(pm8001_ha);
+ /* phy setting support for motherboard controller */
+ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
+ pdev->subsystem_vendor != 0)
+ pm8001_get_phy_settings_info(pm8001_ha);
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc)
@@ -1036,6 +1073,12 @@ static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
+ { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
+ { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
@@ -1056,6 +1099,24 @@ static struct pci_device_id pm8001_pci_table[] = {
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
+ { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
{} /* terminate list */
};
@@ -1107,8 +1168,11 @@ module_init(pm8001_init);
module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
+MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
MODULE_DESCRIPTION(
- "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
+ "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+ "SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index a85d73de7c80..f4eb18e51631 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -447,7 +447,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
rc = pm8001_task_prep_ata(pm8001_ha, ccb);
break;
default:
@@ -704,6 +703,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
int res, retry;
struct sas_task *task = NULL;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
for (retry = 0; retry < 3; retry++) {
task = sas_alloc_slow_task(GFP_KERNEL);
@@ -729,6 +730,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
goto ex_err;
}
wait_for_completion(&task->slow_task->completion);
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion_setstate);
+ }
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 570819464d90..6037d477a183 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -104,6 +104,9 @@ do { \
#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
+ || (dev->device == 0X8076) \
+ || (dev->device == 0X8077))
#define PM8001_NAME_LENGTH 32/* generic length of strings */
extern struct list_head hba_list;
@@ -129,6 +132,61 @@ struct pm8001_ioctl_payload {
u8 *func_specific;
};
+#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
+#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
+#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */
+#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */
+#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */
+#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
+#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
+#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
+#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3
+#define TYPE_GSM_SPACE 1
+#define TYPE_QUEUE 2
+#define TYPE_FATAL 3
+#define TYPE_NON_FATAL 4
+#define TYPE_INBOUND 1
+#define TYPE_OUTBOUND 2
+struct forensic_data {
+ u32 data_type;
+ union {
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ void *direct_data;
+ } gsm_buf;
+ struct {
+ u16 queue_type;
+ u16 queue_index;
+ u32 direct_len;
+ void *direct_data;
+ } queue_buf;
+ struct {
+ u32 direct_len;
+ u32 direct_offset;
+ u32 read_len;
+ void *direct_data;
+ } data_buf;
+ };
+};
+
+/* bit31-26 - mask bar */
+#define SCRATCH_PAD0_BAR_MASK 0xFC000000
+/* bit25-0 - offset mask */
+#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF
+/* if AAP error state */
+#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF
+/* Inbound doorbell bit7 */
+#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80
+/* Inbound doorbell bit7 SPCV */
+#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80
+#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */
+
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -343,6 +401,7 @@ union main_cfg_table {
u32 phy_attr_table_offset;
u32 port_recovery_timer;
u32 interrupt_reassertion_delay;
+ u32 fatal_n_non_fatal_dump; /* 0x28 */
} pm80xx_tbl;
};
@@ -417,6 +476,13 @@ struct pm8001_hba_info {
struct pm8001_hba_memspace io_mem[6];
struct mpi_mem_req memoryMap;
struct encrypt encrypt_info; /* support encryption */
+ struct forensic_data forensic_info;
+ u32 fatal_bar_loc;
+ u32 forensic_last_offset;
+ u32 fatal_forensic_shift_offset;
+ u32 forensic_fatal_step;
+ u32 evtlog_ib_offset;
+ u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
@@ -425,6 +491,7 @@ struct pm8001_hba_info {
void __iomem *pspa_q_tbl_addr;
/*MPI SAS PHY attributes Queue Config Table Addr*/
void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
+ void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl;
struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
@@ -629,7 +696,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
-
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf);
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
+ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9f91030211e8..8987b1706216 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -45,6 +45,228 @@
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
+
+
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+{
+ u32 reg_val;
+ unsigned long start;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
+ /* confirm the setting is written */
+ start = jiffies + HZ; /* 1 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
+ } while ((reg_val != shift_value) && time_before(jiffies, start));
+ if (reg_val != shift_value) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -1;
+ }
+ return 0;
+}
+
+void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
+ const void *destination,
+ u32 dw_count, u32 bus_base_number)
+{
+ u32 index, value, offset;
+ u32 *destination1;
+ destination1 = (u32 *)destination;
+
+ for (index = 0; index < dw_count; index += 4, destination1++) {
+ offset = (soffset + index / 4);
+ if (offset < (64 * 1024)) {
+ value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+ *destination1 = cpu_to_le32(value);
+ }
+ }
+ return;
+}
+
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 status = 1;
+ u32 accum_len , reg_val, index, *temp;
+ unsigned long start;
+ u8 *direct_data;
+ char *fatal_error_data = buf;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = buf;
+ if (pm8001_ha->chip_id == chip_8001) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "Not supported for SPC controller");
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+ direct_data = (u8 *)fatal_error_data;
+ pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
+ pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ }
+
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+ /* start to get data */
+ /* Program the MEMBASE II Shifting Register with 0x00.*/
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->forensic_last_offset = 0;
+ pm8001_ha->forensic_fatal_step = 0;
+ pm8001_ha->fatal_bar_loc = 0;
+ }
+ /* Read until accum_len is retrived */
+ accum_len = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
+ accum_len));
+ if (accum_len == 0xFFFFFFFF) {
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("Possible PCI issue 0x%x not expected\n",
+ accum_len));
+ return status;
+ }
+ if (accum_len == 0 || accum_len >= 0x100000) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (pm8001_ha->forensic_fatal_step == 0) {
+moreData:
+ if (pm8001_ha->forensic_info.data_buf.direct_data) {
+ /* Data is in bar, copy to host memory */
+ pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len ,
+ 1);
+ }
+ pm8001_ha->fatal_bar_loc +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.direct_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_last_offset +=
+ pm8001_ha->forensic_info.data_buf.direct_len;
+ pm8001_ha->forensic_info.data_buf.read_len =
+ pm8001_ha->forensic_info.data_buf.direct_len;
+
+ if (pm8001_ha->forensic_last_offset >= accum_len) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 3);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+
+ pm8001_ha->fatal_bar_loc = 0;
+ pm8001_ha->forensic_fatal_step = 1;
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ status = 0;
+ return (char *)pm8001_ha->
+ forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+
+ /* Increment the MEMBASE II Shifting Register value by 0x100.*/
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 2);
+ for (index = 0; index < 256; index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
+ }
+ pm8001_ha->fatal_forensic_shift_offset += 0x100;
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+ }
+ if (pm8001_ha->forensic_fatal_step == 1) {
+ pm8001_ha->fatal_forensic_shift_offset = 0;
+ /* Read 64K of the debug data. */
+ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /* Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
+ } while ((reg_val) && time_before(jiffies, start));
+
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+ " = 0x%x\n", reg_val));
+ return -1;
+ }
+
+ /* Read the next 64K of the debug data. */
+ pm8001_ha->forensic_fatal_step = 0;
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) !=
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->
+ forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ status = 0;
+ }
+ }
+
+ return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf;
+}
+
/**
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
@@ -430,7 +652,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
table is updated */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
do {
udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -579,6 +805,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->pspa_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
0xFFFFFF);
+ pm8001_ha->fatal_tbl_addr =
+ base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
+ 0xFFFFFF);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("GST OFFSET 0x%x\n",
@@ -913,7 +1142,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
/* wait until Inbound DoorBell Clear Register toggled */
- max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
+ if (IS_SPCV_12G(pm8001_ha->pdev)) {
+ max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ } else {
+ max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ }
do {
udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -959,6 +1192,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
{
u32 regval;
u32 bootloader_state;
+ u32 ibutton0, ibutton1;
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
@@ -1017,7 +1251,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (-1 == check_fw_ready(pm8001_ha)) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Firmware is not ready!\n"));
- return -EBUSY;
+ /* check iButton feature support for motherboard controller */
+ if (pm8001_ha->pdev->subsystem_vendor !=
+ PCI_VENDOR_ID_ADAPTEC2 &&
+ pm8001_ha->pdev->subsystem_vendor != 0) {
+ ibutton0 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_6);
+ ibutton1 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_7);
+ if (!ibutton0 && !ibutton1) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("iButton Feature is"
+ " not Available!!!\n"));
+ return -EBUSY;
+ }
+ if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("CRC Check for iButton"
+ " Feature Failed!!!\n"));
+ return -EBUSY;
+ }
+ }
}
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("SPCv soft reset Complete\n"));
@@ -1268,6 +1522,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW))
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive"
+ ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha,
@@ -1691,6 +1952,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param;
u32 status;
u32 tag;
+ int i, j;
+ u8 sata_addr_low[4];
+ u32 temp_sata_addr_low, temp_sata_addr_hi;
+ u8 sata_addr_hi[4];
struct sata_completion_resp *psataPayload;
struct task_status_struct *ts;
struct ata_task_resp *resp ;
@@ -1740,7 +2005,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+ /* Print sas address of IO failed device */
+ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+ (status != IO_UNDERFLOW)) {
+ if (!((t->dev->parent) &&
+ (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+ sata_addr_low[i] = pm8001_ha->sas_addr[j];
+ for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+ sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+ memcpy(&temp_sata_addr_low, sata_addr_low,
+ sizeof(sata_addr_low));
+ memcpy(&temp_sata_addr_hi, sata_addr_hi,
+ sizeof(sata_addr_hi));
+ temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+ |((temp_sata_addr_hi << 8) &
+ 0xff0000) |
+ ((temp_sata_addr_hi >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_hi << 24) &
+ 0xff000000));
+ temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+ & 0xff) |
+ ((temp_sata_addr_low << 8)
+ & 0xff0000) |
+ ((temp_sata_addr_low >> 8)
+ & 0xff00) |
+ ((temp_sata_addr_low << 24)
+ & 0xff000000)) +
+ pm8001_dev->attached_phy +
+ 0x10);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%08x%08x", temp_sata_addr_hi,
+ temp_sata_addr_low));
+ } else {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("SAS Address of IO Failure Drive:"
+ "%016llx", SAS_ADDR(t->dev->sas_addr)));
+ }
+ }
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3103,9 +3408,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" pm80xx_addition_functionality\n"));
+ u8 page_code;
+ struct set_phy_profile_resp *pPayload =
+ (struct set_phy_profile_resp *)(piomb + 4);
+ u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
+ u32 status = le32_to_cpu(pPayload->status);
+ page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
+ if (status) {
+ /* status is FAILED */
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("PhyProfile command failed with status "
+ "0x%08X \n", status));
+ return -1;
+ } else {
+ if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Invalid page code 0x%X\n",
+ page_code));
+ return -1;
+ }
+ }
return 0;
}
@@ -3484,8 +3807,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
else
pm8001_ha->smp_exp_mode = SMP_INDIRECT;
- /* DIRECT MODE support only in spcv/ve */
- pm8001_ha->smp_exp_mode = SMP_DIRECT;
tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
preq_dma_addr = (char *)phys_to_virt(tmp_addr);
@@ -3501,7 +3822,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address
- (&task->smp_task.smp_req) - 4);
+ (&task->smp_task.smp_req) + 4);
/* exclude 4 bytes for SMP req header and CRC */
smp_cmd.long_smp_req.long_req_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
@@ -3604,10 +3925,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
int ret;
- u64 phys_addr;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ;
- static u32 inb;
- static u32 outb;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -3626,7 +3947,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -3658,6 +3980,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ }
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
ssp_cmd.enc_addr_high = 0;
@@ -3674,7 +4020,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
} else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SAS command 0x%x inb q %x\n",
- task->ssp_task.cmd->cmnd[0], inb));
+ task->ssp_task.cmd->cmnd[0], q_index));
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@@ -3693,6 +4039,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + ssp_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != ssp_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, ssp_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ ssp_cmd.addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
+ ssp_cmd.esgl = cpu_to_le32(1<<31);
+ }
} else if (task->num_scatter == 0) {
ssp_cmd.addr_low = 0;
ssp_cmd.addr_high = 0;
@@ -3700,11 +4070,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
-
- /* rotate the outb queue */
- outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
-
+ q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ &ssp_cmd, q_index);
return ret;
}
@@ -3716,18 +4084,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
int ret;
- static u32 inb;
- static u32 outb;
+ u32 q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr;
+ u64 phys_addr, start_addr, end_addr;
+ u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
if (task->data_dir == PCI_DMA_NONE) {
ATAP = 0x04; /* no data*/
@@ -3788,6 +4157,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.enc_len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.enc_addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x "
+ "end_addr_high=0x%08x end_addr_low"
+ "=0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.enc_len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.enc_addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.enc_addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.enc_esgl =
+ cpu_to_le32(1 << 31);
+ }
} else if (task->num_scatter == 0) {
sata_cmd.enc_addr_low = 0;
sata_cmd.enc_addr_high = 0;
@@ -3808,7 +4202,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SATA command 0x%x inb %x\n",
- sata_cmd.sata_fis.command, inb));
+ sata_cmd.sata_fis.command, q_index));
/* dad (bit 0-1) is 0 */
sata_cmd.ncqtag_atap_dir_m_dad =
cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -3829,6 +4223,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+ /* Check 4G Boundary */
+ start_addr = cpu_to_le64(dma_addr);
+ end_addr = (start_addr + sata_cmd.len) - 1;
+ end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+ end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ if (end_addr_high != sata_cmd.addr_high) {
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("The sg list address "
+ "start_addr=0x%016llx data_len=0x%x"
+ "end_addr_high=0x%08x end_addr_low="
+ "0x%08x has crossed 4G boundary\n",
+ start_addr, sata_cmd.len,
+ end_addr_high, end_addr_low));
+ pm8001_chip_make_sg(task->scatter, 1,
+ ccb->buf_prd);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info,
+ buf_prd[0]);
+ sata_cmd.addr_low =
+ lower_32_bits(phys_addr);
+ sata_cmd.addr_high =
+ upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1 << 31);
+ }
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
sata_cmd.addr_high = 0;
@@ -3905,12 +4323,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
-
+ q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, outb++);
-
- /* rotate the outb queue */
- outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+ &sata_cmd, q_index);
return ret;
}
@@ -3941,9 +4356,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
** [14] 0b disable spin up hold; 1b enable spin up hold
** [15] ob no change in current PHY analig setup 1b enable using SPAST
*/
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
+ if (!IS_SPCV_12G(pm8001_ha->pdev))
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | phy_id);
+ else
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | LINKRATE_15 |
+ LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
+ phy_id);
+
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4102,6 +4524,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
return IRQ_HANDLED;
}
+void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+ u32 operation, u32 phyid, u32 length, u32 *buf)
+{
+ u32 tag , i, j = 0;
+ int rc;
+ struct set_phy_profile_req payload;
+ struct inbound_queue_table *circularQ;
+ u32 opc = OPC_INB_SET_PHY_PROFILE;
+
+ memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+ circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ payload.tag = cpu_to_le32(tag);
+ payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk(" phy profile command for phy %x ,length is %d\n",
+ payload.ppc_phyid, length));
+ for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ j++;
+ }
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+ u32 length, u8 *buf)
+{
+ u32 page_code, i;
+
+ page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ mpi_set_phy_profile_req(pm8001_ha,
+ SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
+ length = length + PHY_DWORD_LENGTH;
+ }
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+}
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 2b760ba75d7b..c86816bea424 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -168,6 +168,11 @@
#define LINKRATE_15 (0x01 << 8)
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x06 << 8)
+#define LINKRATE_120 (0x08 << 8)
+
+/* phy_profile */
+#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
+#define PHY_DWORD_LENGTH 0xC
/* Thermal related */
#define THERMAL_ENABLE 0x1
@@ -1223,10 +1228,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
/* MSGU CONFIGURATION TABLE*/
-#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01
-#define SPCv_MSGU_CFG_TABLE_RESET 0x02
-#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04
-#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08
+#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001
+#define SPCv_MSGU_CFG_TABLE_RESET 0x002
+#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008
#define MSGU_IBDB_SET 0x00
#define MSGU_HOST_INT_STATUS 0x08
#define MSGU_HOST_INT_MASK 0x0C
@@ -1520,4 +1525,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
+
+#define MEMBASE_II_SHIFT_REGISTER 0x1010
#endif
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c37b244cf8ae..ff0fc7c7812f 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_target.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7a99ae7f39d..5f174b83f56f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
return 0;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (off < ha->md_template_size) {
rval = memory_read_from_buffer(buf, count,
&off, ha->md_tmplt_hdr, ha->md_template_size);
@@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_md_free(vha);
qla82xx_md_prep(vha);
}
@@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
} else
qla2x00_system_error(vha);
break;
case 4:
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (ha->md_tmplt_hdr)
ql_dbg(ql_dbg_user, vha, 0x705b,
"MiniDump supported with this firmware.\n");
@@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
}
break;
case 5:
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case 6:
@@ -586,7 +590,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
uint32_t idc_control;
-
+ uint8_t *tmp_data = NULL;
if (off != 0)
return -EINVAL;
@@ -597,14 +601,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
"Issuing ISP reset.\n");
scsi_block_requests(vha->host);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha,
+ QLA8044_IDC_DRV_CTRL);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control | GRACEFUL_RESET_BIT1));
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
}
- qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
scsi_unblock_requests(vha->host);
break;
@@ -640,7 +653,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
break;
}
case 0x2025e:
- if (!IS_QLA82XX(ha) || vha != base_vha) {
+ if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
"FCoE ctx reset no supported.\n");
return -EPERM;
@@ -674,7 +687,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
__qla83xx_set_idc_control(vha, idc_control);
qla83xx_idc_unlock(vha, 0);
break;
-
+ case 0x20261:
+ ql_dbg(ql_dbg_user, vha, 0x70e0,
+ "Updating cache versions without reset ");
+
+ tmp_data = vmalloc(256);
+ if (!tmp_data) {
+ ql_log(ql_log_warn, vha, 0x70e1,
+ "Unable to allocate memory for VPD information update.\n");
+ return -ENOMEM;
+ }
+ ha->isp_ops->get_flash_version(vha, tmp_data);
+ vfree(tmp_data);
+ break;
}
return count;
}
@@ -1212,7 +1237,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1265,10 +1290,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
- vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
- vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+ return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
}
static ssize_t
@@ -1287,12 +1309,6 @@ qla2x00_thermal_temp_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
- if (!vha->hw->thermal_support) {
- ql_log(ql_log_warn, vha, 0x70db,
- "Thermal not supported by this card.\n");
- goto done;
- }
-
if (qla2x00_reset_active(vha)) {
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
@@ -1725,11 +1741,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->lip_count = stats->lip_cnt;
pfc_host_stat->tx_frames = stats->tx_frames;
pfc_host_stat->rx_frames = stats->rx_frames;
- pfc_host_stat->dumped_frames = stats->dumped_frames;
+ pfc_host_stat->dumped_frames = stats->discarded_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
+ pfc_host_stat->error_frames =
+ stats->dropped_frames + stats->discarded_frames;
+ pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
+ pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
}
+ pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
+ pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
+ pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->seconds_since_last_reset =
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ do_div(pfc_host_stat->seconds_since_last_reset, HZ);
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1738,6 +1764,16 @@ done:
}
static void
+qla2x00_reset_host_stats(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+}
+
+static void
qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
@@ -2043,6 +2079,7 @@ struct fc_function_template qla2xxx_transport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
.vport_create = qla24xx_vport_create,
.vport_disable = qla24xx_vport_disable,
@@ -2089,6 +2126,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
+
.bsg_request = qla24xx_bsg_request,
.bsg_timeout = qla24xx_bsg_timeout,
};
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 417eaad50ae2..aa57bf0af574 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -125,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
uint32_t len;
uint32_t oper;
- if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -559,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -627,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
{
int ret = 0;
int rval = 0;
+ unsigned long rem_tmo = 0, current_tmo = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
goto done_set_internal;
if (mode == INTERNAL_LOOPBACK)
@@ -652,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp,
- (DCBX_COMP_TIMEOUT * HZ))) {
+ current_tmo = DCBX_COMP_TIMEOUT * HZ;
+ while (1) {
+ rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
+ current_tmo);
+ if (!ha->idc_extend_tmo || rem_tmo) {
+ ha->idc_extend_tmo = 0;
+ break;
+ }
+ current_tmo = ha->idc_extend_tmo * HZ;
+ ha->idc_extend_tmo = 0;
+ }
+
+ if (!rem_tmo) {
ql_dbg(ql_dbg_user, vha, 0x7022,
"DCBX completion not received.\n");
ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
@@ -678,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
ha->notify_dcbx_comp = 0;
+ ha->idc_extend_tmo = 0;
done_set_internal:
return rval;
@@ -773,7 +786,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
- ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -783,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
- if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
@@ -806,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"elreq.options=%04x\n", elreq.options);
if (elreq.options == EXTERNAL_LOOPBACK)
- if (IS_QLA8031(ha))
+ if (IS_QLA8031(ha) || IS_QLA8044(ha))
rval = qla81xx_set_loopback_mode(vha,
config, new_config, elreq.options);
else
@@ -1266,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
int rval = 0;
struct qla_port_param *port_param = NULL;
fc_port_t *fcport = NULL;
+ int found = 0;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint8_t *rsp_ptr = NULL;
@@ -1288,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
fcport->port_name, sizeof(fcport->port_name)))
continue;
+
+ found = 1;
break;
}
- if (!fcport) {
+ if (!found) {
ql_log(ql_log_warn, vha, 0x7049,
"Failed to find port.\n");
return -EINVAL;
@@ -1318,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x704c,
- "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
- "%04x %x %04x %04x.\n", fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2],
- fcport->port_name[3], fcport->port_name[4],
- fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
+ "iIDMA cmd failed for %8phN -- "
+ "%04x %x %04x %04x.\n", fcport->port_name,
+ rval, fcport->fp_speed, mb[0], mb[1]);
rval = (DID_ERROR << 16);
} else {
if (!port_param->mode) {
@@ -1882,7 +1895,7 @@ done:
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_OK) << 16;
bsg_job->job_done(bsg_job);
- /* Always retrun success, vendor rsp carries correct status */
+ /* Always return success, vendor rsp carries correct status */
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index df132fec6d86..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,24 +11,28 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x117a | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0159 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1181 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
+ * | | | 0x1018-0x1019 |
+ * | | | 0x1115-0x1116 |
+ * | | | 0x10ca |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4022 | 0x4002,0x4013 |
- * | Async Events | 0x5081 | 0x502b-0x502f |
+ * | Async Events | 0x5087 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
- * | | | 0x5040,0x5075 |
- * | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70dd | 0x7018,0x702e, |
+ * | | | 0x5084,0x5075 |
+ * | | | 0x503d,0x5044 |
+ * | Timer Routines | 0x6012 | |
+ * | User Space Interactions | 0x70e1 | 0x7018,0x702e, |
* | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
@@ -36,17 +40,28 @@
* | | | 0x70a5,0x70a6, |
* | | | 0x70a8,0x70ab, |
* | | | 0x70ad-0x70ae, |
- * | | | 0x70d1-0x70da, |
+ * | | | 0x70d1-0x70db, |
* | | | 0x7047,0x703b |
- * | Task Management | 0x803c | 0x8025-0x8026 |
+ * | | | 0x70de-0x70df, |
+ * | Task Management | 0x803d | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 |
+ * | | | 0xb09e,0xb0ae |
+ * | | | 0xb0e0-0xb0ef |
+ * | | | 0xb085,0xb0dc |
+ * | | | 0xb107,0xb108 |
+ * | | | 0xb111,0xb11e |
+ * | | | 0xb12c,0xb12d |
+ * | | | 0xb13a,0xb142 |
+ * | | | 0xb13c-0xb140 |
+ * | | | 0xb149 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe070 | |
- * | Target Mode Management | 0xf072 | |
+ * | Target Mode | 0xe070 | 0xe021 |
+ * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
+ * | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -519,7 +534,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
uint32_t cnt, que_idx;
uint8_t que_cnt;
struct qla2xxx_mq_chain *mq = ptr;
- struct device_reg_25xxmq __iomem *reg;
+ device_reg_t __iomem *reg;
if (!ha->mqenable || IS_QLA83XX(ha))
return ptr;
@@ -533,13 +548,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
- reg = (struct device_reg_25xxmq __iomem *)
- (ha->mqiobase + cnt * QLA_QUE_PAGE);
+ reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
- mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
- mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
- mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
- mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
+ mq->qregs[que_idx] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ mq->qregs[que_idx+1] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ mq->qregs[que_idx+2] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ mq->qregs[que_idx+3] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -941,7 +959,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
risc_address = ext_mem_cnt = 0;
@@ -2530,7 +2548,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
if (!ql_mask_match(level))
return;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
mbx_reg = &reg82->mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha))
mbx_reg = &reg24->mailbox0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 95ca32a71e75..93db74ef3461 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
+#include "qla_nx2.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@@ -642,6 +643,7 @@ struct device_reg_fx00 {
uint32_t initval6; /* C8 */
uint32_t initval7; /* CC */
uint32_t fwheartbeat; /* D0 */
+ uint32_t pseudoaen; /* D4 */
};
@@ -805,6 +807,7 @@ struct mbx_cmd_32 {
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
+#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -997,6 +1000,7 @@ struct mbx_cmd_32 {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
/*
@@ -1233,8 +1237,9 @@ struct link_statistics {
uint32_t unused1[0x1a];
uint32_t tx_frames;
uint32_t rx_frames;
- uint32_t dumped_frames;
- uint32_t unused2[2];
+ uint32_t discarded_frames;
+ uint32_t dropped_frames;
+ uint32_t unused2[1];
uint32_t nos_rcvd;
};
@@ -2656,6 +2661,11 @@ struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
uint64_t output_bytes;
+ uint64_t input_requests;
+ uint64_t output_requests;
+ uint32_t control_requests;
+
+ uint64_t jiffies_at_last_reset;
};
struct bidi_statistics {
@@ -2670,9 +2680,8 @@ struct bidi_statistics {
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable || IS_QLA83XX(ha)) ? \
- ((device_reg_t __iomem *)(ha->mqiobase) +\
- (QLA_QUE_PAGE * id)) :\
- ((device_reg_t __iomem *)(ha->iobase)))
+ ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+ ((void __iomem *)ha->iobase))
#define QLA_REQ_QUE_ID(tag) \
((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
#define QLA_DEFAULT_QUE_QOS 5
@@ -2935,7 +2944,8 @@ struct qla_hw_data {
#define DT_ISP2031 BIT_15
#define DT_ISP8031 BIT_16
#define DT_ISPFX00 BIT_17
-#define DT_ISP_LAST (DT_ISPFX00 << 1)
+#define DT_ISP8044 BIT_18
+#define DT_ISP_LAST (DT_ISP8044 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2961,6 +2971,7 @@ struct qla_hw_data {
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044)
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
@@ -2975,10 +2986,12 @@ struct qla_hw_data {
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
- IS_QLA8031(ha))
+ IS_QLA8031(ha) || IS_QLA8044(ha))
+#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA82XX(ha) || IS_QLA83XX(ha))
+ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
@@ -3187,10 +3200,12 @@ struct qla_hw_data {
uint32_t nvram_data_off;
uint32_t fdt_wrt_disable;
+ uint32_t fdt_wrt_enable;
uint32_t fdt_erase_cmd;
uint32_t fdt_block_size;
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
+ uint32_t fdt_wrt_sts_reg_cmd;
uint32_t flt_region_flt;
uint32_t flt_region_fdt;
@@ -3277,6 +3292,7 @@ struct qla_hw_data {
/* QLA83XX IDC specific fields */
uint32_t idc_audit_ts;
+ uint32_t idc_extend_tmo;
/* DPC low-priority workqueue */
struct workqueue_struct *dpc_lp_wq;
@@ -3296,9 +3312,6 @@ struct qla_hw_data {
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
- uint16_t thermal_support;
-#define THERMAL_SUPPORT_I2C BIT_0
-#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3364,6 +3377,7 @@ typedef struct scsi_qla_host {
#define PORT_UPDATE_NEEDED 24
#define FX00_RESET_RECOVERY 25
#define FX00_TARGET_SCAN 26
+#define FX00_CRITEMP_RECOVERY 27
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3402,7 +3416,7 @@ typedef struct scsi_qla_host {
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
- uint32_t vp_abort_cnt;
+ uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
@@ -3435,6 +3449,7 @@ typedef struct scsi_qla_host {
struct bidi_statistics bidi_stats;
atomic_t vref_count;
+ struct qla8044_reset_template reset_tmplt;
} scsi_qla_host_t;
#define SET_VP_IDX 1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 1ac2b0e3a0e1..610d3aa905a0 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1387,6 +1387,8 @@ struct qla_flt_header {
#define FLT_REG_GOLD_FW 0x2f
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
+#define FLT_REG_CNA_FW 0x97
+#define FLT_REG_BOOT_CODE_8044 0xA2
#define FLT_REG_FCOE_FW 0xA4
#define FLT_REG_FCOE_NVRAM_0 0xAA
#define FLT_REG_FCOE_NVRAM_1 0xAC
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2d98232a08eb..4446bf5fe292 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,12 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla82xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla25xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -435,19 +441,19 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
*/
extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
@@ -463,21 +469,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
- uint32_t, uint16_t *);
+ uint32_t, uint16_t *);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
+extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
+ uint8_t *, uint32_t, uint32_t);
+extern void qla8044_watchdog(struct scsi_qla_host *vha);
extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
@@ -498,7 +508,7 @@ extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
- uint8_t *, uint32_t);
+ uint8_t *, uint32_t);
extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
/*
@@ -584,6 +594,7 @@ extern int qlafx00_start_scsi(srb_t *);
extern int qlafx00_abort_isp(scsi_qla_host_t *);
extern int qlafx00_iospace_config(struct qla_hw_data *);
extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
@@ -601,6 +612,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
/* qla82xx related functions */
@@ -619,9 +631,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *);
/* Firmware and flash related functions */
extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
/* Mailbox related functions */
extern int qla82xx_abort_isp(scsi_qla_host_t *);
@@ -662,7 +674,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
- size_t, char *);
+ size_t, char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(scsi_qla_host_t *);
@@ -674,6 +686,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
+extern int qla82xx_read_temperature(scsi_qla_host_t *);
+extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -695,5 +709,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *);
extern int qla82xx_md_collect(scsi_qla_host_t *);
extern void qla82xx_md_prep(scsi_qla_host_t *);
extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
+
+/* Function declarations for ISP8044 */
+extern int qla8044_idc_lock(struct qla_hw_data *ha);
+extern void qla8044_idc_unlock(struct qla_hw_data *ha);
+extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
+extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
+extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
+extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg, const uint32_t value);
+extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
+extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
+extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
+void qla8044_get_minidump(struct scsi_qla_host *vha);
+int qla8044_collect_md_data(struct scsi_qla_host *vha);
+extern int qla8044_md_get_template(scsi_qla_host_t *);
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern irqreturn_t qla8044_intr_handler(int, void *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern int qla8044_abort_isp(scsi_qla_host_t *);
+extern int qla8044_check_fw_alive(struct scsi_qla_host *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0926451980ed..cd47f1b32d9a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+ vha->qla_stats.control_requests++;
+
return (ms_pkt);
}
@@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
+ vha->qla_stats.control_requests++;
+
return (ct_pkt);
}
@@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->d_id.b.domain = 0xf0;
ql_dbg(ql_dbg_disc, vha, 0x2063,
- "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GA_NXT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- fcport->node_name[0], fcport->node_name[1],
- fcport->node_name[2], fcport->node_name[3],
- fcport->node_name[4], fcport->node_name[5],
- fcport->node_name[6], fcport->node_name[7],
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
+ fcport->node_name, fcport->port_name,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
}
@@ -447,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x2058,
- "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
- "pn %02x%02x%02x%02x%02x%02x%02X%02x "
+ "GID_PT entry - nn %8phN pn %8phN "
"portid=%02x%02x%02x.\n",
- list[i].node_name[0], list[i].node_name[1],
- list[i].node_name[2], list[i].node_name[3],
- list[i].node_name[4], list[i].node_name[5],
- list[i].node_name[6], list[i].node_name[7],
- list[i].port_name[0], list[i].port_name[1],
- list[i].port_name[2], list[i].port_name[3],
- list[i].port_name[4], list[i].port_name[5],
- list[i].port_name[6], list[i].port_name[7],
+ list[i].node_name, list[i].port_name,
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa);
}
@@ -739,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
wc = (data_size - 16) / 4; /* Size in 32bit words. */
sns_cmd->p.cmd.size = cpu_to_le16(wc);
+ vha->qla_stats.control_requests++;
+
return (sns_cmd);
}
@@ -796,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->d_id.b.domain = 0xf0;
ql_dbg(ql_dbg_disc, vha, 0x2061,
- "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GA_NXT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- fcport->node_name[0], fcport->node_name[1],
- fcport->node_name[2], fcport->node_name[3],
- fcport->node_name[4], fcport->node_name[5],
- fcport->node_name[6], fcport->node_name[7],
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
+ fcport->node_name, fcport->port_name,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
}
@@ -991,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x206e,
- "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GID_PT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- list[i].node_name[0], list[i].node_name[1],
- list[i].node_name[2], list[i].node_name[3],
- list[i].node_name[4], list[i].node_name[5],
- list[i].node_name[6], list[i].node_name[7],
- list[i].port_name[0], list[i].port_name[1],
- list[i].port_name[2], list[i].port_name[3],
- list[i].port_name[4], list[i].port_name[5],
- list[i].port_name[6], list[i].port_name[7],
+ list[i].node_name, list[i].port_name,
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa);
}
@@ -1321,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
size += 4 + WWN_SIZE;
ql_dbg(ql_dbg_disc, vha, 0x2025,
- "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
- eiter->a.node_name[0], eiter->a.node_name[1],
- eiter->a.node_name[2], eiter->a.node_name[3],
- eiter->a.node_name[4], eiter->a.node_name[5],
- eiter->a.node_name[6], eiter->a.node_name[7]);
+ "NodeName = %8phN.\n", eiter->a.node_name);
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1428,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x202e,
- "RHBA identifier = "
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
- ct_req->req.rhba.hba_identifier[0],
- ct_req->req.rhba.hba_identifier[1],
- ct_req->req.rhba.hba_identifier[2],
- ct_req->req.rhba.hba_identifier[3],
- ct_req->req.rhba.hba_identifier[4],
- ct_req->req.rhba.hba_identifier[5],
- ct_req->req.rhba.hba_identifier[6],
- ct_req->req.rhba.hba_identifier[7], size);
+ "RHBA identifier = %8phN size=%d.\n",
+ ct_req->req.rhba.hba_identifier, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
entries, size);
@@ -1494,11 +1456,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x2036,
- "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
- ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
- ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
- ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
- ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
+ "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -1678,12 +1636,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x203e,
- "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
- ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
- ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
- ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
- ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
- size);
+ "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
entries, size);
@@ -1940,16 +1893,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
ql_dbg(ql_dbg_disc, vha, 0x205b,
"GPSC ext entry - fpn "
- "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
- "speed=%04x.\n",
- list[i].fabric_port_name[0],
- list[i].fabric_port_name[1],
- list[i].fabric_port_name[2],
- list[i].fabric_port_name[3],
- list[i].fabric_port_name[4],
- list[i].fabric_port_name[5],
- list[i].fabric_port_name[6],
- list[i].fabric_port_name[7],
+ "%8phN speeds=%04x speed=%04x.\n",
+ list[i].fabric_port_name,
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f2216ed2ad8c..03f715e7591e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -524,7 +524,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -552,7 +552,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval) {
ql_log(ql_log_fatal, vha, 0x004f,
"Unable to validate FLASH data.\n");
- return (rval);
+ return rval;
+ }
+
+ if (IS_QLA8044(ha)) {
+ qla8044_read_reset_template(vha);
+
+ /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+ * If DONRESET_BIT0 is set, drivers should not set dev_state
+ * to NEED_RESET. But if NEED_RESET is set, drivers should
+ * should honor the reset. */
+ if (ql2xdontresethba == 1)
+ qla8044_set_idc_dontreset(vha);
}
ha->isp_ops->get_flash_version(vha, req->ring);
@@ -564,12 +575,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
- "Masking HBA WWPN "
- "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
- vha->port_name[0], vha->port_name[1],
- vha->port_name[2], vha->port_name[3],
- vha->port_name[4], vha->port_name[5],
- vha->port_name[6], vha->port_name[7]);
+ "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
return QLA_FUNCTION_FAILED;
}
@@ -620,6 +626,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ if (IS_P3P_TYPE(ha))
+ qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
+ else
+ qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1332,7 +1343,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
@@ -1615,7 +1626,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
unsigned long flags;
uint16_t fw_major_version;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
qla2x00_stop_firmware(vha);
@@ -1651,7 +1662,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
qla82xx_check_md_needed(vha);
else
rval = qla2x00_get_fw_version(vha);
@@ -1681,7 +1692,7 @@ enable_82xx_npiv:
goto failed;
if (!fw_major_version && ql2xallocfwdump
- && !IS_QLA82XX(ha))
+ && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
}
} else {
@@ -1849,7 +1860,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
/* Update Serial Link options. */
@@ -3061,22 +3072,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
mb);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x2004,
- "Unable to adjust iIDMA "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
- "%04x.\n", fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]);
+ "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
+ fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2005,
- "iIDMA adjusted to %s GB/s "
- "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ "iIDMA adjusted to %s GB/s on %8phN.\n",
qla2x00_get_link_speed_str(ha, fcport->fp_speed),
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7]);
+ fcport->port_name);
}
}
@@ -4007,10 +4009,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
uint32_t class_type_mask = 0x3;
uint16_t fcoe_other_function = 0xffff, i;
- qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
-
- qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
- qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ if (IS_QLA8044(ha)) {
+ drv_presence = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ dev_part_info1 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO_INDEX);
+ dev_part_info2 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO2);
+ } else {
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ }
for (i = 0; i < 8; i++) {
class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
@@ -4347,7 +4357,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* For ISP82XX, driver waits for completion of the commands.
* online flag should be set.
*/
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -4360,7 +4370,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
*/
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -4403,7 +4413,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_chip_reset_cleanup(vha);
ql_log(ql_log_info, vha, 0x00b4,
"Done chip reset cleanup.\n");
@@ -4723,7 +4733,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
vha->flags.online = 0;
@@ -4789,8 +4799,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
- if (IS_QLA82XX(ha))
- ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5552,6 +5560,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Determine NVRAM starting address. */
ha->nvram_size = sizeof(struct nvram_81xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5734,7 +5744,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Link Down Timeout = 0:
*
- * When Port Down timer expires we will start returning
+ * When Port Down timer expires we will start returning
* I/O's to OS with "DID_NO_CONNECT".
*
* Link Down Timeout != 0:
@@ -6061,7 +6071,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (priority < 0)
return QLA_FUNCTION_FAILED;
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(vha->hw)) {
fcport->fcp_prio = priority & 0xf;
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 28c38b4929ce..957088b04611 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -59,7 +59,7 @@ qla2x00_poll(struct rsp_que *rsp)
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ef0a5481b9dd..46b9307e8be4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
return (cflags);
}
@@ -474,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_start_iocbs(vha);
} else {
/* Adjust ring index. */
@@ -642,10 +644,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
cur_seg = scsi_sglist(cmd);
@@ -758,10 +762,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1844,7 +1850,7 @@ skip_cmd_array:
if (req->cnt < req_cnt) {
if (ha->mqenable || IS_QLA83XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
- else if (IS_QLA82XX(ha))
+ else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
@@ -2056,6 +2062,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(bsg_job->reply_payload.sg_list)));
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
+
+ sp->fcport->vha->qla_stats.control_requests++;
}
static void
@@ -2133,6 +2141,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
avail_dsds--;
}
ct_iocb->entry_count = entry_count;
+
+ sp->fcport->vha->qla_stats.control_requests++;
}
static void
@@ -2685,6 +2695,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
vha->bidi_stats.transfer_bytes += req_data_len;
vha->bidi_stats.io_count++;
+ vha->qla_stats.output_bytes += req_data_len;
+ vha->qla_stats.output_requests++;
+
/* Only one dsd is available for bidirectional IOCB, remaining dsds
* are bundled in continuation iocb
*/
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2d8e7b812352..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -282,25 +282,38 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"%04x %04x %04x %04x %04x %04x %04x.\n",
event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
mb[4], mb[5], mb[6]);
- if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
- vha->hw->flags.idc_compl_status = 1;
- if (vha->hw->notify_dcbx_comp)
- complete(&vha->hw->dcbx_comp);
- }
-
- /* Acknowledgement needed? [Notify && non-zero timeout]. */
- timeout = (descr >> 8) & 0xf;
- if (aen != MBA_IDC_NOTIFY || !timeout)
- return;
+ switch (aen) {
+ /* Handle IDC Error completion case. */
+ case MBA_IDC_COMPLETE:
+ if (mb[1] >> 15) {
+ vha->hw->flags.idc_compl_status = 1;
+ if (vha->hw->notify_dcbx_comp)
+ complete(&vha->hw->dcbx_comp);
+ }
+ break;
- ql_dbg(ql_dbg_async, vha, 0x5022,
- "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
- vha->host_no, event[aen & 0xff], timeout);
+ case MBA_IDC_NOTIFY:
+ /* Acknowledgement needed? [Notify && non-zero timeout]. */
+ timeout = (descr >> 8) & 0xf;
+ ql_dbg(ql_dbg_async, vha, 0x5022,
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
+ vha->host_no, event[aen & 0xff], timeout);
- rval = qla2x00_post_idc_ack_work(vha, mb);
- if (rval != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0x5023,
- "IDC failed to post ACK.\n");
+ if (!timeout)
+ return;
+ rval = qla2x00_post_idc_ack_work(vha, mb);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x5023,
+ "IDC failed to post ACK.\n");
+ break;
+ case MBA_IDC_TIME_EXT:
+ vha->hw->idc_extend_tmo = descr;
+ ql_dbg(ql_dbg_async, vha, 0x5087,
+ "%lu Inter-Driver Communication %s -- "
+ "Extend timeout by=%d.\n",
+ vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
+ break;
+ }
}
#define LS_UNKNOWN 2
@@ -691,7 +704,8 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
+ mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ : mbx;
ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
@@ -740,7 +754,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
+ if (IS_CNA_CAPABLE(ha)) {
ql_dbg(ql_dbg_async, vha, 0x500d,
"DCBX Completed -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
@@ -1002,7 +1016,7 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- if (IS_QLA8031(vha->hw)) {
+ if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
mb[4] = RD_REG_WORD(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
@@ -1022,7 +1036,8 @@ skip_rio:
complete(&ha->lb_portup_comp);
/* Fallthru */
case MBA_IDC_TIME_EXT:
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
+ IS_QLA8044(ha))
qla81xx_idc_event(vha, mb[0], mb[1]);
break;
@@ -1063,7 +1078,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1080,7 +1095,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1100,7 +1115,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1805,6 +1820,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
if (scsi_status == 0) {
bsg_job->reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
+ vha->qla_stats.input_bytes +=
+ bsg_job->reply->reply_payload_rcv_len;
+ vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
goto done;
@@ -1939,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
que = MSW(sts->handle);
req = ha->req_q_map[que];
+ /* Check for invalid queue pointer */
+ if (req == NULL ||
+ que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+ ql_dbg(ql_dbg_io, vha, 0x3059,
+ "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+ "que=%u.\n", sts->handle, req, que);
+ return;
+ }
+
/* Validate handle. */
if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
@@ -1949,7 +1976,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2176,8 +2203,10 @@ check_scsi_status:
}
ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
- "Port down status: port-state=0x%x.\n",
- atomic_read(&fcport->state));
+ "Port to be marked lost on fcport=%02x%02x%02x, current "
+ "port state= %s.\n", fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ port_state_str[atomic_read(&fcport->state)]);
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2212,16 +2241,13 @@ check_scsi_status:
out:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
- "FCP command status: 0x%x-0x%x (0x%x) "
- "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
- "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
- cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
- cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
- cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
+ cp->cmnd, scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
if (!res)
@@ -2324,7 +2350,7 @@ fatal:
ql_log(ql_log_warn, vha, 0x5030,
"Error entry - invalid handle/queue.\n");
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2452,7 +2478,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
} else
@@ -2865,7 +2891,7 @@ msix_failed:
ret = request_irq(qentry->vector,
qla83xx_msix_entries[i].handler,
0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_QLA82XX(ha)) {
+ } else if (IS_P3P_TYPE(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
@@ -2950,7 +2976,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7257c3c4f2d0..a9aae500e791 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, vha, 0x1004,
@@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
- else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
+ else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command = mcp->mb[0];
mboxes = mcp->out_mb;
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+ "Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
optr =
(uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
- if (mboxes & BIT_0)
+ if (mboxes & BIT_0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1112,
+ "mbox[%d]<-0x%04x\n", cnt, *iptr);
WRT_REG_WORD(optr, *iptr);
+ }
mboxes >>= 1;
optr++;
iptr++;
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
- "Loaded MBX registers (displayed in bytes) =.\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
- (uint8_t *)mcp->mb, 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
- ".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
- ((uint8_t *)mcp->mb + 0x10), 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
- ".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
- ((uint8_t *)mcp->mb + 0x20), 8);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
"I/O Address = %p.\n", optr);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
@@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -189,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -236,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -254,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr2 = mcp->mb;
iptr = (uint16_t *)&ha->mailbox_out[0];
mboxes = mcp->in_mb;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1113,
+ "Mailbox registers (IN):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
- if (mboxes & BIT_0)
+ if (mboxes & BIT_0) {
*iptr2 = *iptr;
+ ql_dbg(ql_dbg_mbx, vha, 0x1114,
+ "mbox[%d]->0x%04x\n", cnt, *iptr2);
+ }
mboxes >>= 1;
iptr2++;
@@ -537,7 +535,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
@@ -556,7 +554,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
else
ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
ha->mpi_version[0] = mcp->mb[10] & 0xff;
ha->mpi_version[1] = mcp->mb[11] >> 8;
ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1201,7 +1199,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
"Entered %s.\n", __func__);
- if (IS_QLA82XX(ha) && ql2xdbwr)
+ if (IS_P3P_TYPE(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
@@ -1667,7 +1665,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_LINK_INITIALIZATION;
- mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[1] = BIT_4;
+ if (vha->hw->operating_mode == LOOP)
+ mcp->mb[1] |= BIT_6;
+ else
+ mcp->mb[1] |= BIT_5;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3574,7 +3576,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
unsigned long flags;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
@@ -3595,9 +3596,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
- QLA_QUE_PAGE * req->id);
-
mcp->mb[4] = req->id;
/* que in ptr index */
mcp->mb[8] = 0;
@@ -3619,12 +3617,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(&reg->req_q_in, 0);
+ WRT_REG_DWORD(req->req_q_in, 0);
if (!IS_QLA83XX(ha))
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ WRT_REG_DWORD(req->req_q_out, 0);
}
- req->req_q_in = &reg->req_q_in;
- req->req_q_out = &reg->req_q_out;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
@@ -3646,7 +3642,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
unsigned long flags;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
@@ -3664,9 +3659,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
- QLA_QUE_PAGE * rsp->id);
-
mcp->mb[4] = rsp->id;
/* que in ptr index */
mcp->mb[8] = 0;
@@ -3690,9 +3682,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ WRT_REG_DWORD(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha))
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ WRT_REG_DWORD(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3872,6 +3864,112 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
return rval;
}
+int
+qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int i;
+ int len;
+ uint16_t *str;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
+ "Entered %s.\n", __func__);
+
+ str = (void *)version;
+ len = strlen(version);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ for (i = 4; i < 16 && len; i++, str++, len -= 2) {
+ mcp->mb[i] = cpu_to_le16p(str);
+ mcp->out_mb |= 1<<i;
+ }
+ for (; i < 16; i++) {
+ mcp->mb[i] = 0;
+ mcp->out_mb |= 1<<i;
+ }
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117c,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
+ IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x117f,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * 4 - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1180,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -4407,7 +4505,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -4512,40 +4610,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
struct qla_hw_data *ha = vha->hw;
uint8_t byte;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
- "Entered %s.\n", __func__);
-
- if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
- rval = qla2x00_read_sfp(vha, 0, &byte,
- 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
- *temp = byte;
- if (rval == QLA_SUCCESS)
- goto done;
-
- ql_log(ql_log_warn, vha, 0x10c9,
- "Thermal not supported through I2C bus, trying alternate "
- "method (ISP access).\n");
- ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1150,
+ "Thermal not supported by this card.\n");
+ return rval;
}
- if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
- rval = qla2x00_read_asic_temperature(vha, temp);
- if (rval == QLA_SUCCESS)
- goto done;
-
- ql_log(ql_log_warn, vha, 0x1019,
- "Thermal not supported through ISP.\n");
- ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
+ if (IS_QLA25XX(ha)) {
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ ha->pdev->subsystem_device == 0x0175) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ ha->pdev->subsystem_device == 0x338e) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x10c9,
+ "Thermal not supported by this card.\n");
+ return rval;
}
- ql_log(ql_log_warn, vha, 0x1150,
- "Thermal not supported by this card "
- "(ignoring further requests).\n");
- return rval;
+ if (IS_QLA82XX(ha)) {
+ *temp = qla82xx_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ } else if (IS_QLA8044(ha)) {
+ *temp = qla8044_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ }
-done:
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
- "Done %s.\n", __func__);
+ rval = qla2x00_read_asic_temperature(vha, temp);
return rval;
}
@@ -4595,7 +4696,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
"Entered %s.\n", __func__);
- if (!IS_QLA82XX(ha))
+ if (!IS_P3P_TYPE(ha))
return QLA_FUNCTION_FAILED;
memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4713,6 +4814,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
}
int
+qla8044_md_get_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+ int offset = 0, size = MINIDUMP_SIZE_36K;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
+ "Entered %s.\n", __func__);
+
+ ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+ ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+ if (!ha->md_tmplt_hdr) {
+ ql_log(ql_log_warn, vha, 0xb11b,
+ "Unable to allocate memory for Minidump template.\n");
+ return rval;
+ }
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ while (offset < ha->md_template_size) {
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT);
+ mcp->mb[3] = MSW(RQST_TMPLT);
+ mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[8] = LSW(size);
+ mcp->mb[9] = MSW(size);
+ mcp->mb[10] = offset & 0x0000FFFF;
+ mcp->mb[11] = offset & 0xFFFF0000;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xb11c,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ ((mcp->mb[1] << 16) | mcp->mb[0]),
+ ((mcp->mb[3] << 16) | mcp->mb[2]));
+ return rval;
+ } else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
+ "Done %s.\n", __func__);
+ offset = offset + size;
+ }
+ return rval;
+}
+
+int
qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
{
int rval;
@@ -4808,7 +4963,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA82XX(ha))
+ if (!IS_P3P_TYPE(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f868a9f98afe..a72df701fb38 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->cnt = req->length;
req->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
+ req->req_q_in = &reg->isp25mq.req_q_in;
+ req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index d7993797f46e..62ee7131b204 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -294,7 +294,7 @@ premature_exit:
* Context:
* Kernel context.
*/
-static int
+int
qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
{
int rval;
@@ -776,6 +776,29 @@ qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
}
int
+qlafx00_loop_reset(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct fc_port *fcport;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xtargetreset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_taskm, vha, 0x803d,
+ "Bus Reset failed: Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
+ }
+ }
+ }
+ return QLA_SUCCESS;
+}
+
+int
qlafx00_iospace_config(struct qla_hw_data *ha)
{
if (pci_request_selected_regions(ha->pdev, ha->bars,
@@ -918,12 +941,23 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
uint32_t aenmbx, aenmbx7 = 0;
+ uint32_t pseudo_aen;
uint32_t state[5];
bool done = false;
/* 30 seconds wait - Adjust if required */
wait_time = 30;
+ pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ if (pseudo_aen == 1) {
+ aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ rval = qlafx00_driver_shutdown(vha, 10);
+ if (rval != QLA_SUCCESS)
+ qlafx00_soft_reset(vha);
+ }
+
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
@@ -1349,21 +1383,22 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
}
static void
-qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
{
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
vha->flags.online = 0;
- ha->flags.chip_reset_done = 0;
ha->mr.fw_hbt_en = 0;
- clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- vha->qla_stats.total_isp_aborts++;
-
- ql_log(ql_log_info, vha, 0x013f,
- "Performing ISP error recovery - ha = %p.\n", ha);
- ha->isp_ops->reset_chip(vha);
+ if (!critemp) {
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ql_log(ql_log_info, vha, 0x013f,
+ "Performing ISP error recovery - ha = %p.\n", ha);
+ ha->isp_ops->reset_chip(vha);
+ }
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1383,12 +1418,19 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
if (!ha->flags.eeh_busy) {
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (critemp) {
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ } else {
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ }
}
qla2x00_free_irqs(vha);
- set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ if (critemp)
+ set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
+ else
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
/* Clear the Interrupts */
QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
@@ -1475,6 +1517,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
uint32_t fw_heart_beat;
uint32_t aenmbx0;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t tempc;
/* Check firmware health */
if (ha->mr.fw_hbt_cnt)
@@ -1539,10 +1582,36 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
ha->mr.fw_reset_timer_tick =
QLAFX00_MAX_RESET_INTERVAL;
+ } else if (aenmbx0 == MBA_FW_RESET_FCT) {
+ ha->mr.fw_reset_timer_tick =
+ QLAFX00_MAX_RESET_INTERVAL;
}
ha->mr.old_aenmbx0_state = aenmbx0;
ha->mr.fw_reset_timer_tick--;
}
+ if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
+ /*
+ * Critical temperature recovery to be
+ * performed in timer routine
+ */
+ if (ha->mr.fw_critemp_timer_tick == 0) {
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_timer, vha, 0x6012,
+ "ISPFx00(%s): Critical temp timer, "
+ "current SOC temperature: %d\n",
+ __func__, tempc);
+ if (tempc < ha->mr.critical_temperature) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ clear_bit(FX00_CRITEMP_RECOVERY,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ ha->mr.fw_critemp_timer_tick =
+ QLAFX00_CRITEMP_INTERVAL;
+ } else {
+ ha->mr.fw_critemp_timer_tick--;
+ }
+ }
}
/*
@@ -1570,7 +1639,7 @@ qlafx00_reset_initialize(scsi_qla_host_t *vha)
if (vha->flags.online) {
scsi_block_requests(vha->host);
- qlafx00_abort_isp_cleanup(vha);
+ qlafx00_abort_isp_cleanup(vha, false);
}
ql_log(ql_log_info, vha, 0x0143,
@@ -1602,7 +1671,15 @@ qlafx00_abort_isp(scsi_qla_host_t *vha)
}
scsi_block_requests(vha->host);
- qlafx00_abort_isp_cleanup(vha);
+ qlafx00_abort_isp_cleanup(vha, false);
+ } else {
+ scsi_block_requests(vha->host);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ha->isp_ops->reset_chip(vha);
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
}
ql_log(ql_log_info, vha, 0x0145,
@@ -1688,6 +1765,15 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
aen_code = FCH_EVT_LINKDOWN;
aen_data = 0;
break;
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5082,
+ "Process critical temperature event "
+ "aenmb[0]: %x\n",
+ evt->u.aenfx.evtcode);
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha, true);
+ scsi_unblock_requests(vha->host);
+ break;
}
fc_host_post_event(vha->host, fc_get_event_number(),
@@ -1779,7 +1865,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
p_sysid = utsname();
if (!p_sysid) {
ql_log(ql_log_warn, vha, 0x303c,
- "Not able to get the system informtion\n");
+ "Not able to get the system information\n");
goto done_free_sp;
}
break;
@@ -1879,6 +1965,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
sizeof(vha->hw->mr.uboot_version));
memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
sizeof(vha->hw->mr.fru_serial_num));
+ vha->hw->mr.critical_temperature =
+ (pinfo->nominal_temp_value) ?
+ pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
+ ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
+ QLAFX00_EXTENDED_IO_EN_MASK) != 0;
} else if (fx_type == FXDISC_GET_PORT_INFO) {
struct port_info_data *pinfo =
(struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
@@ -2021,6 +2112,7 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t tempc;
/* Clear adapter flags. */
vha->flags.online = 0;
@@ -2028,7 +2120,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->thermal_support = 0;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -2072,6 +2163,11 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_init, vha, 0x0152,
+ "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
+ __func__, tempc);
+
return rval;
}
@@ -2526,16 +2622,13 @@ check_scsi_status:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
- "FCP command status: 0x%x-0x%x (0x%x) "
- "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
- "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
- "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+ "par_sense_len=0x%x, rsp_info_len=0x%x\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->tgt_id,
- lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
- cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
- cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+ lscsi_status, cp->cmnd, scsi_bufflen(cp),
rsp_info_len, resid_len, fw_resid_len, sense_len,
par_sense_len, rsp_info_len);
@@ -2720,9 +2813,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_fx00 *pkt;
response_t *lptr;
- if (!vha->flags.online)
- return;
-
while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
RESPONSE_PROCESSED) {
lptr = rsp->ring_ptr;
@@ -2824,6 +2914,28 @@ qlafx00_async_event(scsi_qla_host_t *vha)
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
data_size = 4;
break;
+
+ case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
+ ql_log(ql_log_info, vha, 0x5085,
+ "Asynchronous over temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
+ ql_log(ql_log_info, vha, 0x5086,
+ "Asynchronous normal temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5083,
+ "Asynchronous critical temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
default:
ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 1a092af0e2c3..79a93c52baec 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -329,11 +329,13 @@ struct config_info_data {
uint64_t adapter_id;
uint32_t cluster_key_len;
- uint8_t cluster_key[10];
+ uint8_t cluster_key[16];
uint64_t cluster_master_id;
uint64_t cluster_slave_id;
uint8_t cluster_flags;
+ uint32_t enabled_capabilities;
+ uint32_t nominal_temp_value;
} __packed;
#define FXDISC_GET_CONFIG_INFO 0x01
@@ -342,10 +344,11 @@ struct config_info_data {
#define FXDISC_GET_TGT_NODE_LIST 0x81
#define FXDISC_REG_HOST_INFO 0x99
-#define QLAFX00_HBA_ICNTRL_REG 0x21B08
+#define QLAFX00_HBA_ICNTRL_REG 0x20B08
#define QLAFX00_ICR_ENB_MASK 0x80000000
#define QLAFX00_ICR_DIS_MASK 0x7fffffff
#define QLAFX00_HST_RST_REG 0x18264
+#define QLAFX00_SOC_TEMP_REG 0x184C4
#define QLAFX00_HST_TO_HBA_REG 0x20A04
#define QLAFX00_HBA_TO_HOST_REG 0x21B70
#define QLAFX00_HST_INT_STS_BITS 0x7
@@ -361,6 +364,9 @@ struct config_info_data {
#define QLAFX00_INTR_ALL_CMPLT 0x7
#define QLAFX00_MBA_SYSTEM_ERR 0x8002
+#define QLAFX00_MBA_TEMP_OVER 0x8005
+#define QLAFX00_MBA_TEMP_NORM 0x8006
+#define QLAFX00_MBA_TEMP_CRIT 0x8007
#define QLAFX00_MBA_LINK_UP 0x8011
#define QLAFX00_MBA_LINK_DOWN 0x8012
#define QLAFX00_MBA_PORT_UPDATE 0x8014
@@ -434,9 +440,11 @@ struct qla_mt_iocb_rqst_fx00 {
__le32 dataword_extra;
- __le32 req_len;
+ __le16 req_len;
+ __le16 reserved_2;
- __le32 rsp_len;
+ __le16 rsp_len;
+ __le16 reserved_3;
};
struct qla_mt_iocb_rsp_fx00 {
@@ -499,12 +507,37 @@ struct mr_data_fx00 {
uint32_t old_fw_hbt_cnt;
uint16_t fw_reset_timer_tick;
uint8_t fw_reset_timer_exp;
+ uint16_t fw_critemp_timer_tick;
uint32_t old_aenmbx0_state;
+ uint32_t critical_temperature;
+ bool extended_io_enabled;
};
+#define QLAFX00_EXTENDED_IO_EN_MASK 0x20
+
+/*
+ * SoC Junction Temperature is stored in
+ * bits 9:1 of SoC Junction Temperature Register
+ * in a firmware specific format format.
+ * To get the temperature in Celsius degrees
+ * the value from this bitfiled should be converted
+ * using this formula:
+ * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
+ * where X is the bit field value
+ * this macro reads the register, extracts the bitfield value,
+ * performs the calcualtions and returns temperature in Celsius
+ */
+#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
+ ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
+
+
#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
+#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */
+
+#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index cce0cd0d7ec4..11ce53dcbe7e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -848,7 +848,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
uint32_t lock_owner = 0;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
@@ -857,9 +856,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
break;
if (timeout >= qla82xx_rom_lock_timeout) {
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
- ql_dbg(ql_dbg_p3p, vha, 0xb085,
- "Failed to acquire rom lock, acquired by %d.\n",
- lock_owner);
return -1;
}
timeout++;
@@ -1666,8 +1662,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
}
/* Mapping of IO base pointer */
- ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
- 0xbc000 + (ha->pdev->devfn << 11));
+ if (IS_QLA8044(ha)) {
+ ha->iobase =
+ (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+ } else if (IS_QLA82XX(ha)) {
+ ha->iobase =
+ (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+ }
if (!ql2xdbwr) {
ha->nxdb_wr_ptr =
@@ -1967,7 +1969,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
* @ha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
-static void
+void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
@@ -2075,13 +2077,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
WRT_REG_DWORD(&reg->host_int, 0);
}
-#ifdef QL_DEBUG_LEVEL_17
- if (!irq && ha->flags.eeh_busy)
- ql_log(ql_log_warn, vha, 0x503d,
- "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2147,13 +2142,6 @@ qla82xx_msix_default(int irq, void *dev_id)
WRT_REG_DWORD(&reg->host_int, 0);
} while (0);
-#ifdef QL_DEBUG_LEVEL_17
- if (!irq && ha->flags.eeh_busy)
- ql_log(ql_log_warn, vha, 0x5044,
- "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2247,7 +2235,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_mbx_intr_enable(vha);
spin_lock_irq(&ha->hardware_lock);
- qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
spin_unlock_irq(&ha->hardware_lock);
ha->interrupts_on = 1;
}
@@ -2258,7 +2249,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_mbx_intr_disable(vha);
spin_lock_irq(&ha->hardware_lock);
- qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
spin_unlock_irq(&ha->hardware_lock);
ha->interrupts_on = 0;
}
@@ -3008,6 +3002,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
if (IS_QLA82XX(ha)) {
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_clear_drv_active(vha);
+ qla8044_idc_unlock(ha);
}
/* Set DEV_FAILED flag to disable timer */
@@ -3134,7 +3131,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
if (fw_major_version != ha->fw_major_version ||
fw_minor_version != ha->fw_minor_version ||
fw_subminor_version != ha->fw_subminor_version) {
- ql_log(ql_log_info, vha, 0xb02d,
+ ql_dbg(ql_dbg_p3p, vha, 0xb02d,
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
@@ -3330,6 +3327,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha)
return 0;
}
+int qla82xx_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
+ return qla82xx_get_temp_val(temp);
+}
+
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3423,8 +3428,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
- int rval;
- rval = qla82xx_device_state_handler(vha);
+ int rval = -1;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
return rval;
}
@@ -3432,17 +3447,25 @@ void
qla82xx_set_reset_owner(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- uint32_t dev_state;
+ uint32_t dev_state = 0;
+
+ if (IS_QLA82XX(ha))
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ else if (IS_QLA8044(ha))
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
ql_log(ql_log_info, vha, 0xb02f,
"HW State: NEED RESET\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_NEED_RESET);
- ha->flags.nic_core_reset_owner = 1;
- ql_dbg(ql_dbg_p3p, vha, 0xb030,
- "reset_owner is 0x%x\n", ha->portnum);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ ha->flags.nic_core_reset_owner = 1;
+ ql_dbg(ql_dbg_p3p, vha, 0xb030,
+ "reset_owner is 0x%x\n", ha->portnum);
+ } else if (IS_QLA8044(ha))
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
} else
ql_log(ql_log_info, vha, 0xb031,
"Device state is 0x%x = %s.\n",
@@ -3463,7 +3486,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
int
qla82xx_abort_isp(scsi_qla_host_t *vha)
{
- int rval;
+ int rval = -1;
struct qla_hw_data *ha = vha->hw;
if (vha->device_flags & DFLG_DEV_FAILED) {
@@ -3477,7 +3500,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
- rval = qla82xx_device_state_handler(vha);
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
qla82xx_idc_lock(ha);
qla82xx_clear_rst_ready(ha);
@@ -3597,7 +3628,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
void
qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
{
- int i;
+ int i, fw_state = 0;
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
@@ -3608,7 +3639,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
if (!ha->flags.isp82xx_fw_hung) {
for (i = 0; i < 2; i++) {
msleep(1000);
- if (qla82xx_check_fw_alive(vha)) {
+ if (IS_QLA82XX(ha))
+ fw_state = qla82xx_check_fw_alive(vha);
+ else if (IS_QLA8044(ha))
+ fw_state = qla8044_check_fw_alive(vha);
+ if (fw_state) {
ha->flags.isp82xx_fw_hung = 1;
qla82xx_clear_pending_mbx(vha);
break;
@@ -4072,7 +4107,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
return QLA_SUCCESS;
}
-static int
+int
qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4384,7 +4419,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
ha->md_template_size / 1024);
/* Get Minidump template */
- rval = qla82xx_md_get_template(vha);
+ if (IS_QLA8044(ha))
+ rval = qla8044_md_get_template(vha);
+ else
+ rval = qla82xx_md_get_template(vha);
+
if (rval == QLA_SUCCESS) {
ql_dbg(ql_dbg_p3p, vha, 0xb04b,
"MiniDump Template obtained\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index d268e8406fdb..1bb93dbbccbb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -589,6 +589,7 @@
* The PCI VendorID and DeviceID for our board.
*/
#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044
#define QLA82XX_MSIX_TBL_SPACE 8192
#define QLA82XX_PCI_REG_MSIX_TBL 0x44
@@ -954,6 +955,11 @@ struct ct6_dsd {
#define QLA82XX_CNTRL 98
#define QLA82XX_TLHDR 99
#define QLA82XX_RDEND 255
+#define QLA8044_POLLRD 35
+#define QLA8044_RDMUX2 36
+#define QLA8044_L1DTG 8
+#define QLA8044_L1ITG 9
+#define QLA8044_POLLRDMWR 37
/*
* Opcodes for Control Entries.
@@ -1191,4 +1197,8 @@ enum {
QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
};
+
+#define LEG_INTR_PTR_OFFSET 0x38C0
+#define LEG_INTR_TRIG_OFFSET 0x38C4
+#define LEG_INTR_MASK_OFFSET 0x38C8
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
new file mode 100644
index 000000000000..8164cc9e7286
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -0,0 +1,3716 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+
+/* 8044 Flash Read/Write functions */
+uint32_t
+qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
+{
+ return readl((void __iomem *) (ha->nx_pcibase + addr));
+}
+
+void
+qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
+{
+ writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
+}
+
+int
+qla8044_rd_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
+ else
+ return QLA_FUNCTION_FAILED;
+}
+
+void
+qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg,
+ const uint32_t value)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
+}
+
+static int
+qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
+{
+ uint32_t val;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
+ val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
+
+ if (val != addr) {
+ ql_log(ql_log_warn, vha, 0xb087,
+ "%s: Failed to set register window : "
+ "addr written 0x%x, read 0x%x!\n",
+ __func__, addr, val);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+ return ret_val;
+}
+
+static int
+qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
+ else
+ ql_log(ql_log_warn, vha, 0xb088,
+ "%s: failed read of addr 0x%x!\n", __func__, addr);
+ return ret_val;
+}
+
+static int
+qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
+ else
+ ql_log(ql_log_warn, vha, 0xb089,
+ "%s: failed wrt to addr 0x%x, data 0x%x\n",
+ __func__, addr, data);
+ return ret_val;
+}
+
+/*
+ * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ *
+ */
+static void
+qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr)
+{
+ uint32_t value;
+
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ qla8044_wr_reg_indirect(vha, waddr, value);
+}
+
+/*
+ * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @vha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
+{
+ uint32_t value;
+
+ if (p_rmw_hdr->index_a)
+ value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
+ else
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ value &= p_rmw_hdr->test_mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qla8044_wr_reg_indirect(vha, waddr, value);
+ return;
+}
+
+inline void
+qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state |= (1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+void
+qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state &= ~(1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+/**
+ *
+ * qla8044_lock_recovery - Recovers the idc_lock.
+ * @ha : Pointer to adapter structure
+ *
+ * Lock Recovery Register
+ * 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
+ * valid if bits 1..0 are set by driver doing lock recovery.
+ * 1-0 1 - Driver intends to force unlock the IDC lock.
+ * 2 - Driver is moving forward to unlock the IDC lock. Driver clears
+ * this field after force unlocking the IDC lock.
+ *
+ * Lock Recovery process
+ * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
+ * greater than 0, then wait for the other driver to unlock otherwise
+ * move to the next step.
+ * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
+ * register bits 1..0 and also set the function# in bits 5..2.
+ * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
+ * Wait for the other driver to perform lock recovery if the function
+ * number in bits 5..2 has changed, otherwise move to the next step.
+ * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
+ * leaving your function# in bits 5..2.
+ * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
+ * the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
+ **/
+static int
+qla8044_lock_recovery(struct scsi_qla_host *vha)
+{
+ uint32_t lock = 0, lockid;
+ struct qla_hw_data *ha = vha->hw;
+
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+
+ /* Check for other Recovery in progress, go wait */
+ if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
+ return QLA_FUNCTION_FAILED;
+
+ /* Intent to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
+ msleep(200);
+
+ /* Check Intent to Recover is advertised */
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+ if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
+ , __func__, ha->portnum);
+
+ /* Proceed to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
+ PROCEED_TO_RECOVER);
+
+ /* Force Unlock() */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+
+ /* Clear bits 0-5 in IDC_RECOVERY register*/
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
+
+ /* Get lock() */
+ lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+ if (lock) {
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
+ return QLA_SUCCESS;
+ } else
+ return QLA_FUNCTION_FAILED;
+}
+
+int
+qla8044_idc_lock(struct qla_hw_data *ha)
+{
+ uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
+ uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (status == 0) {
+ /* acquire semaphore5 from PCI HW block */
+ status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+
+ if (status) {
+ /* Increment Counter (8-31) and update func_num (0-7) on
+ * getting a successful lock */
+ lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
+ break;
+ }
+
+ if (timeout == 0)
+ first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if (++timeout >=
+ (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
+ tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ func_num = tmo_owner & 0xFF;
+ lock_cnt = tmo_owner >> 8;
+ ql_log(ql_log_warn, vha, 0xb114,
+ "%s: Lock by func %d failed after 2s, lock held "
+ "by func %d, lock count %d, first_owner %d\n",
+ __func__, ha->portnum, func_num, lock_cnt,
+ (first_owner & 0xFF));
+ if (first_owner != tmo_owner) {
+ /* Some other driver got lock,
+ * OR same driver got lock again (counter
+ * value changed), when we were waiting for
+ * lock. Retry for another 2 sec */
+ ql_dbg(ql_dbg_p3p, vha, 0xb115,
+ "%s: %d: IDC lock failed\n",
+ __func__, ha->portnum);
+ timeout = 0;
+ } else {
+ /* Same driver holding lock > 2sec.
+ * Force Recovery */
+ if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
+ /* Recovered and got lock */
+ ret_val = QLA_SUCCESS;
+ ql_dbg(ql_dbg_p3p, vha, 0xb116,
+ "%s:IDC lock Recovery by %d"
+ "successful...\n", __func__,
+ ha->portnum);
+ }
+ /* Recovery Failed, some other function
+ * has the lock, wait for 2secs
+ * and retry
+ */
+ ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+ "%s: IDC lock Recovery by %d "
+ "failed, Retrying timout\n", __func__,
+ ha->portnum);
+ timeout = 0;
+ }
+ }
+ msleep(QLA8044_DRV_LOCK_MSLEEP);
+ }
+ return ret_val;
+}
+
+void
+qla8044_idc_unlock(struct qla_hw_data *ha)
+{
+ int id;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if ((id & 0xFF) != ha->portnum) {
+ ql_log(ql_log_warn, vha, 0xb118,
+ "%s: IDC Unlock by %d failed, lock owner is %d!\n",
+ __func__, ha->portnum, (id & 0xFF));
+ return;
+ }
+
+ /* Keep lock counter value, update the ha->func_num to 0xFF */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+}
+
+/* 8044 Flash Lock/Unlock functions */
+static int
+qla8044_flash_lock(scsi_qla_host_t *vha)
+{
+ int lock_owner;
+ int timeout = 0;
+ uint32_t lock_status = 0;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ while (lock_status == 0) {
+ lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
+ if (lock_status)
+ break;
+
+ if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
+ lock_owner = qla8044_rd_reg(ha,
+ QLA8044_FLASH_LOCK_ID);
+ ql_log(ql_log_warn, vha, 0xb113,
+ "%s: flash lock by %d failed, held by %d\n",
+ __func__, ha->portnum, lock_owner);
+ ret_val = QLA_FUNCTION_FAILED;
+ break;
+ }
+ msleep(20);
+ }
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
+ return ret_val;
+}
+
+static void
+qla8044_flash_unlock(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Reading FLASH_UNLOCK register unlocks the Flash */
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
+ ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+}
+
+
+static
+void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
+{
+
+ if (qla8044_flash_lock(vha)) {
+ /* Someone else is holding the lock. */
+ ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
+ }
+
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla8044_flash_unlock(vha);
+}
+
+/*
+ * Address and length are byte address
+ */
+static int
+qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
+ uint32_t flash_addr, int u32_word_count)
+{
+ int i, ret_val = QLA_SUCCESS;
+ uint32_t u32_word;
+
+ if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lock_error;
+ }
+
+ if (flash_addr & 0x03) {
+ ql_log(ql_log_warn, vha, 0xb117,
+ "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ for (i = 0; i < u32_word_count; i++) {
+ if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
+ (flash_addr & 0xFFFF0000))) {
+ ql_log(ql_log_warn, vha, 0xb119,
+ "%s: failed to write addr 0x%x to "
+ "FLASH_DIRECT_WINDOW\n! ",
+ __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(flash_addr),
+ &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08c,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, flash_addr);
+ goto exit_flash_read;
+ }
+
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ flash_addr = flash_addr + 4;
+ }
+
+exit_flash_read:
+ qla8044_flash_unlock(vha);
+
+exit_lock_error:
+ return ret_val;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08d,
+ "%s: Failed to read from flash\n",
+ __func__);
+ }
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+inline int
+qla8044_need_reset(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state, drv_active;
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ rval = drv_state & (1 << ha->portnum);
+
+ if (ha->flags.eeh_busy && drv_active)
+ rval = 1;
+ return rval;
+}
+
+/*
+ * qla8044_write_list - Write the value (p_entry->arg2) to address specified
+ * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
+ * entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_read_write_list - Read from address specified by p_entry->arg1,
+ * write value read to address specified by p_entry->arg2, for all entries in
+ * header with delay of p_hdr->delay between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_read_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_read_write_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
+ int duration, uint32_t test_mask, uint32_t test_result)
+{
+ uint32_t value;
+ int timeout_error;
+ uint8_t retries;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+
+ /* poll every 1/10 of the total duration */
+ retries = duration/10;
+
+ do {
+ if ((value & test_mask) != test_result) {
+ timeout_error = 1;
+ msleep(duration/10);
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+exit_poll_reg:
+ if (timeout_error) {
+ vha->reset_tmplt.seq_error++;
+ ql_log(ql_log_fatal, vha, 0xb090,
+ "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+ __func__, value, test_mask, test_result);
+ }
+
+ return timeout_error;
+}
+
+/*
+ * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
+ * register specified by p_entry->arg1 and compare (value AND test_mask) with
+ * test_result to validate it. Wait for p_hdr->delay between processing entries.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for POLL_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ /* Entries start after 8 byte qla8044_poll, poll header contains
+ * the test_mask, test_value.
+ */
+ p_entry = (struct qla8044_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, p_entry++)
+ qla8044_poll_reg(vha, p_entry->arg1,
+ delay, p_poll->test_mask, p_poll->test_value);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ /*If
+ * (data_read&test_mask != test_value)
+ * read TIMEOUT_ADDR (arg1) and
+ * ADDR (arg2) registers
+ */
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg1, &value);
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg2, &value);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
+ * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
+ * expires.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+
+ p_poll = (struct qla8044_poll *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha,
+ p_entry->dr_addr, p_entry->dr_value);
+ qla8044_wr_reg_indirect(vha,
+ p_entry->ar_addr, p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb091,
+ "%s: Timeout Error: poll list, ",
+ __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb092,
+ "item_num %d, entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
+ * value, write value to p_entry->arg2. Process entries with p_hdr->delay
+ * between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_read_modify_write(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ struct qla8044_rmw *p_rmw_hdr;
+ uint32_t i;
+
+ p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
+ sizeof(struct qla8044_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_rmw_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2, p_rmw_hdr);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
+ * two entries of a sequence.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static
+void qla8044_pause(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+/*
+ * qla8044_template_end - Indicates end of reset sequence processing.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_template_end(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ vha->reset_tmplt.template_end = 1;
+
+ if (vha->reset_tmplt.seq_error == 0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb093,
+ "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb094,
+ "%s: Reset sequence completed with some timeout "
+ "errors.\n", __func__);
+ }
+}
+
+/*
+ * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
+ * if (value & test_mask != test_value) re-read till timeout value expires,
+ * read dr_addr register and assign to reset_tmplt.array.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_poll_read_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ int index;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
+ p_poll->test_mask, p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb095,
+ "%s: Timeout Error: poll "
+ "list, ", __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb096,
+ "Item_num %d, "
+ "entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ } else {
+ index = vha->reset_tmplt.array_index;
+ qla8044_rd_reg_indirect(vha,
+ p_entry->dr_addr, &value);
+ vha->reset_tmplt.array[index++] = value;
+ if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
+ vha->reset_tmplt.array_index = 1;
+ }
+ }
+ }
+}
+
+/*
+ * qla8031_process_reset_template - Process all entries in reset template
+ * till entry with SEQ_END opcode, which indicates end of the reset template
+ * processing. Each entry has a Reset Entry header, entry opcode/command, with
+ * size of the entry, number of entries in sub-sequence and delay in microsecs
+ * or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ *
+ */
+static void
+qla8044_process_reset_template(struct scsi_qla_host *vha,
+ char *p_buff)
+{
+ int index, entries;
+ struct qla8044_reset_entry_hdr *p_hdr;
+ char *p_entry = p_buff;
+
+ vha->reset_tmplt.seq_end = 0;
+ vha->reset_tmplt.template_end = 0;
+ entries = vha->reset_tmplt.hdr->entries;
+ index = vha->reset_tmplt.seq_index;
+
+ for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
+ switch (p_hdr->cmd) {
+ case OPCODE_NOP:
+ break;
+ case OPCODE_WRITE_LIST:
+ qla8044_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_WRITE_LIST:
+ qla8044_read_write_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_LIST:
+ qla8044_poll_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_WRITE_LIST:
+ qla8044_poll_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_MODIFY_WRITE:
+ qla8044_read_modify_write(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_PAUSE:
+ qla8044_pause(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_END:
+ vha->reset_tmplt.seq_end = 1;
+ break;
+ case OPCODE_TMPL_END:
+ qla8044_template_end(vha, p_hdr);
+ break;
+ case OPCODE_POLL_READ_LIST:
+ qla8044_poll_read_list(vha, p_hdr);
+ break;
+ default:
+ ql_log(ql_log_fatal, vha, 0xb097,
+ "%s: Unknown command ==> 0x%04x on "
+ "entry = %d\n", __func__, p_hdr->cmd, index);
+ break;
+ }
+ /*
+ *Set pointer to next entry in the sequence.
+ */
+ p_entry += p_hdr->size;
+ }
+ vha->reset_tmplt.seq_index = index;
+}
+
+static void
+qla8044_process_init_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha,
+ vha->reset_tmplt.init_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb098,
+ "%s: Abrupt INIT Sub-Sequence end.\n",
+ __func__);
+}
+
+static void
+qla8044_process_stop_seq(struct scsi_qla_host *vha)
+{
+ vha->reset_tmplt.seq_index = 0;
+ qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb099,
+ "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
+}
+
+static void
+qla8044_process_start_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
+ if (vha->reset_tmplt.template_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb09a,
+ "%s: Abrupt START Sub-Sequence end.\n",
+ __func__);
+}
+
+static int
+qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
+ uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
+{
+ uint32_t i;
+ uint32_t u32_word;
+ uint32_t flash_offset;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
+
+ if (addr & 0x3) {
+ ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lockless_read;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09c,
+ "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ /* Check if data is spread across multiple sectors */
+ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+ (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* Multi sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09d,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+ if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* This write is needed once for each sector */
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09f,
+ "%s: failed to write addr "
+ "0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0a0,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+exit_lockless_read:
+ return ret_val;
+}
+
+/*
+ * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
+ *
+ * @vha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * data : Data to be written
+ * count : word_count to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
+ uint64_t addr, uint32_t *data, uint32_t count)
+{
+ int i, j, ret_val = QLA_SUCCESS;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write;
+ }
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a1,
+ "%s: write to AGT_ADDR_HI failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+ QLA8044_ADDR_QDR_NET_MAX)) ||
+ (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+ QLA8044_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_ADDR_LO, addr);
+
+ /* Write data */
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_LO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_HI, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a2,
+ "%s: write to AGT_WRDATA failed!\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a3,
+ "%s: write to AGT_CTRL failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a4,
+ "%s: failed to read "
+ "MD_MIU_TEST_AGT_CTRL!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ ql_log(ql_log_fatal, vha, 0xb0a5,
+ "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
+static int
+qla8044_copy_bootloader(struct scsi_qla_host *vha)
+{
+ uint8_t *p_cache;
+ uint32_t src, count, size;
+ uint64_t dest;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ src = QLA8044_BOOTLOADER_FLASH_ADDR;
+ dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
+ size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
+
+ /* 128 bit alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ /* 16 byte count */
+ count = size/16;
+
+ p_cache = vmalloc(size);
+ if (p_cache == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0a6,
+ "%s: Failed to allocate memory for "
+ "boot loader cache\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_copy_bootloader;
+ }
+
+ ret_val = qla8044_lockless_flash_read_u32(vha, src,
+ p_cache, size/sizeof(uint32_t));
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a7,
+ "%s: Error reading F/W from flash!!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
+ __func__);
+
+ /* 128 bit/16 byte write to MS memory */
+ ret_val = qla8044_ms_mem_write_128b(vha, dest,
+ (uint32_t *)p_cache, count);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a9,
+ "%s: Error writing F/W to MS !!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
+ "%s: Wrote F/W (size %d) to MS !!!\n",
+ __func__, size);
+
+exit_copy_error:
+ vfree(p_cache);
+
+exit_copy_bootloader:
+ return ret_val;
+}
+
+static int
+qla8044_restart(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_process_stop_seq(vha);
+
+ /* Collect minidump */
+ if (ql2xmdenable)
+ qla8044_get_minidump(vha);
+ else
+ ql_log(ql_log_fatal, vha, 0xb14c,
+ "Minidump disabled.\n");
+
+ qla8044_process_init_seq(vha);
+
+ if (qla8044_copy_bootloader(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ab,
+ "%s: Copy bootloader, firmware restart failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_restart;
+ }
+
+ /*
+ * Loads F/W from flash
+ */
+ qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
+
+ qla8044_process_start_seq(vha);
+
+exit_restart:
+ return ret_val;
+}
+
+/*
+ * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
+ * initialized.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
+{
+ uint32_t val, ret_val = QLA_FUNCTION_FAILED;
+ int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+ struct qla_hw_data *ha = vha->hw;
+
+ do {
+ val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
+ if (val == PHAN_INITIALIZE_COMPLETE) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
+ "%s: Command Peg initialization "
+ "complete! state=0x%x\n", __func__, val);
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ msleep(CRB_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ return ret_val;
+}
+
+static int
+qla8044_start_firmware(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ if (qla8044_restart(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ad,
+ "%s: Restart Error!!!, Need Reset!!!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_start_fw;
+ } else
+ ql_dbg(ql_dbg_p3p, vha, 0xb0af,
+ "%s: Restart done!\n", __func__);
+
+ ret_val = qla8044_check_cmd_peg_status(vha);
+ if (ret_val) {
+ ql_log(ql_log_fatal, vha, 0xb0b0,
+ "%s: Peg not initialized!\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+
+exit_start_fw:
+ return ret_val;
+}
+
+void
+qla8044_clear_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active &= ~(1 << (ha->portnum));
+
+ ql_log(ql_log_info, vha, 0xb0b1,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+/*
+ * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static int
+qla8044_device_bootstrap(struct scsi_qla_host *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ int i;
+ uint32_t old_count = 0, count = 0;
+ int need_reset = 0;
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ need_reset = qla8044_need_reset(vha);
+
+ if (!need_reset) {
+ old_count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+
+ count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ qla8044_flash_lock_recovery(vha);
+ } else {
+ /* We are trying to perform a recovery here. */
+ if (ha->flags.isp82xx_fw_hung)
+ qla8044_flash_lock_recovery(vha);
+ }
+
+ /* set to DEV_INITIALIZING */
+ ql_log(ql_log_info, vha, 0xb0b2,
+ "%s: HW State: INITIALIZING\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_INITIALIZING);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_start_firmware(vha);
+ qla8044_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xb0b3,
+ "%s: HW State: FAILED\n", __func__);
+ qla8044_clear_drv_active(vha);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ return rval;
+ }
+
+ /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
+ * device goes to INIT state. */
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ha->fw_dumped = 0;
+ }
+
+dev_ready:
+ ql_log(ql_log_info, vha, 0xb0b4,
+ "%s: HW State: READY\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
+
+ return rval;
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+static void
+qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
+{
+ u8 *phdr;
+
+ if (!vha->reset_tmplt.buff) {
+ ql_log(ql_log_fatal, vha, 0xb0b5,
+ "%s: Error Invalid reset_seq_template\n", __func__);
+ return;
+ }
+
+ phdr = vha->reset_tmplt.buff;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
+ "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
+ "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
+ "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
+ *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+ *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+ *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+ *(phdr+13), *(phdr+14), *(phdr+15));
+}
+
+/*
+ * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
+{
+ uint32_t sum = 0;
+ uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
+ int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
+
+ while (u16_count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* checksum of 0 indicates a valid template */
+ if (~sum) {
+ return QLA_SUCCESS;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0b7,
+ "%s: Reset seq checksum failed\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+}
+
+/*
+ * qla8044_read_reset_template - Read Reset Template from Flash, validate
+ * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
+ *
+ * @ha : Pointer to adapter structure
+ */
+void
+qla8044_read_reset_template(struct scsi_qla_host *vha)
+{
+ uint8_t *p_buff;
+ uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+
+ vha->reset_tmplt.seq_error = 0;
+ vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
+ if (vha->reset_tmplt.buff == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0b8,
+ "%s: Failed to allocate reset template resources\n",
+ __func__);
+ goto exit_read_reset_template;
+ }
+
+ p_buff = vha->reset_tmplt.buff;
+ addr = QLA8044_RESET_TEMPLATE_ADDR;
+
+ tmplt_hdr_def_size =
+ sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
+ "%s: Read template hdr size %d from Flash\n",
+ __func__, tmplt_hdr_def_size);
+
+ /* Copy template header from flash */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0ba,
+ "%s: Failed to read reset template\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ vha->reset_tmplt.hdr =
+ (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
+
+ /* Validate the template header size and signature */
+ tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+ if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+ (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+ ql_log(ql_log_fatal, vha, 0xb0bb,
+ "%s: Template Header size invalid %d "
+ "tmplt_hdr_def_size %d!!!\n", __func__,
+ tmplt_hdr_size, tmplt_hdr_def_size);
+ goto exit_read_template_error;
+ }
+
+ addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
+ p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
+ tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
+ vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
+ "%s: Read rest of the template size %d\n",
+ __func__, vha->reset_tmplt.hdr->size);
+
+ /* Copy rest of the template */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0bd,
+ "%s: Failed to read reset tempelate\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ /* Integrity check */
+ if (qla8044_reset_seq_checksum_test(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0be,
+ "%s: Reset Seq checksum failed!\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
+ "%s: Reset Seq checksum passed! Get stop, "
+ "start and init seq offsets\n", __func__);
+
+ /* Get STOP, START, INIT sequence offsets */
+ vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->init_seq_offset;
+
+ vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->start_seq_offset;
+
+ vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->hdr_size;
+
+ qla8044_dump_reset_seq_hdr(vha);
+
+ goto exit_read_reset_template;
+
+exit_read_template_error:
+ vfree(vha->reset_tmplt.buff);
+
+exit_read_reset_template:
+ return;
+}
+
+void
+qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl |= DONTRESET_BIT0;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
+ "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+inline void
+qla8044_set_rst_ready(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_state |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c1,
+ "%s(%ld): drv_state: 0x%08x\n",
+ __func__, vha->host_no, drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+/**
+ * qla8044_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla8044_need_reset_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state = 0, drv_state, drv_active;
+ unsigned long reset_timeout, dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_log(ql_log_fatal, vha, 0xb0c2,
+ "%s: Performing ISP error recovery\n", __func__);
+
+ if (vha->flags.online) {
+ qla8044_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, vha->req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla8044_idc_lock(ha);
+ }
+
+ if (!ha->flags.nic_core_reset_owner) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
+ "%s(%ld): reset acknowledged\n",
+ __func__, vha->host_no);
+ qla8044_set_rst_ready(vha);
+
+ /* Non-reset owners ACK Reset and wait for device INIT state
+ * as part of Reset Recovery by Reset Owner
+ */
+ dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ do {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c4,
+ "%s: Non Reset owner DEV INIT "
+ "TIMEOUT!\n", __func__);
+ break;
+ }
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
+ } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+ } else {
+ qla8044_set_rst_ready(vha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0c5,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, vha->host_no, drv_state, drv_active);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c6,
+ "%s: RESET TIMEOUT!"
+ "drv_state: 0x%08x, drv_active: 0x%08x\n",
+ QLA2XXX_DRIVER_NAME, drv_state, drv_active);
+ break;
+ }
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ }
+
+ if (drv_state != drv_active) {
+ ql_log(ql_log_info, vha, 0xb0c7,
+ "%s(%ld): Reset_owner turning off drv_active "
+ "of non-acking function 0x%x\n", __func__,
+ vha->host_no, (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+ drv_active);
+ }
+
+ /*
+ * Clear RESET OWNER, will be set at next reset
+ * by next RST_OWNER
+ */
+ ha->flags.nic_core_reset_owner = 0;
+
+ /* Start Reset Recovery */
+ qla8044_device_bootstrap(vha);
+ }
+}
+
+static void
+qla8044_set_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_active |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c8,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+static void
+qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl &= ~DONTRESET_BIT0;
+ ql_log(ql_log_info, vha, 0xb0c9,
+ "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static int
+qla8044_set_idc_ver(struct scsi_qla_host *vha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ if (drv_active == (1 << ha->portnum)) {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= (~0xFF);
+ idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ idc_ver);
+ ql_log(ql_log_info, vha, 0xb0ca,
+ "%s: IDC version updated to %d\n",
+ __func__, idc_ver);
+ } else {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= 0xFF;
+ if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
+ ql_log(ql_log_info, vha, 0xb0cb,
+ "%s: qla4xxx driver IDC version %d "
+ "is not compatible with IDC version %d "
+ "of other drivers!\n",
+ __func__, QLA8044_IDC_VER_MAJ_VALUE,
+ idc_ver);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_set_idc_ver;
+ }
+ }
+
+ /* Update IDC_MINOR_VERSION */
+ idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
+ idc_ver &= ~(0x03 << (ha->portnum * 2));
+ idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
+ qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+ return rval;
+}
+
+static int
+qla8044_update_idc_reg(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.init_done)
+ goto exit_update_idc_reg;
+
+ qla8044_idc_lock(ha);
+ qla8044_set_drv_active(vha);
+
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* If we are the first driver to load and
+ * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
+ if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
+ qla8044_clear_idc_dontreset(vha);
+
+ rval = qla8044_set_idc_ver(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ qla8044_clear_drv_active(vha);
+ qla8044_idc_unlock(ha);
+
+exit_update_idc_reg:
+ return rval;
+}
+
+/**
+ * qla8044_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+static void
+qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
+{
+ unsigned long qsnt_timeout;
+ uint32_t drv_state, drv_active, dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.online)
+ qla2x00_quiesce_io(vha);
+ else
+ return;
+
+ qla8044_set_qsnt_ready(vha);
+
+ /* Wait for 30 secs for all functions to ack qsnt mode */
+ qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* Shift drv_active by 1 to match drv_state. As quiescent ready bit
+ position is at bit 1 and drv active is at bit 0 */
+ drv_active = drv_active << 1;
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, qsnt_timeout)) {
+ /* Other functions did not ack, changing state to
+ * DEV_READY
+ */
+ clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_READY);
+ qla8044_clear_qsnt_ready(vha);
+ ql_log(ql_log_info, vha, 0xb0cc,
+ "Timeout waiting for quiescent ack!!!\n");
+ return;
+ }
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active = drv_active << 1;
+ }
+
+ /* All functions have Acked. Set quiescent state */
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_QUIESCENT);
+ ql_log(ql_log_info, vha, 0xb0cd,
+ "%s: HW State: QUIESCENT\n", __func__);
+ }
+}
+
+/*
+ * qla8044_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int
+qla8044_device_state_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = qla8044_update_idc_reg(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ goto exit_error;
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ qla8044_idc_lock(ha);
+
+ while (1) {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_warn, vha, 0xb0cf,
+ "%s: Device Init Failed 0x%x = %s\n",
+ QLA2XXX_DRIVER_NAME, dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ }
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_log(ql_log_info, vha, 0xb0d0,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* NOTE: Make sure idc unlocked upon exit of switch statement */
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ ha->flags.nic_core_reset_owner = 0;
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ rval = qla8044_device_bootstrap(vha);
+ goto exit;
+ case QLA8XXX_DEV_INITIALIZING:
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ /* For ISP8044, if NEED_RESET is set by any driver,
+ * it should be honored, irrespective of IDC_CTRL
+ * DONTRESET_BIT0 */
+ qla8044_need_reset_handler(vha);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* idc locked/unlocked in handler */
+ qla8044_need_qsnt_handler(vha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ ql_log(ql_log_info, vha, 0xb0d1,
+ "HW State: QUIESCENT\n");
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ default:
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ }
+ }
+exit:
+ qla8044_idc_unlock(ha);
+
+exit_error:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int
+qla8044_check_temp(struct scsi_qla_host *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0xb0d2,
+ "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down\n", temp_val);
+ status = QLA_FUNCTION_FAILED;
+ return status;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0xb0d3,
+ "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ }
+ return 0;
+}
+
+int qla8044_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ return qla82xx_get_temp_val(temp);
+}
+
+/**
+ * qla8044_check_fw_alive - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+int
+qla8044_check_fw_alive(struct scsi_qla_host *vha)
+{
+ uint32_t fw_heartbeat_counter;
+ uint32_t halt_status1, halt_status2;
+ int status = QLA_SUCCESS;
+
+ fw_heartbeat_counter = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
+ "scsi%ld: %s: Device in frozen "
+ "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+ vha->host_no, __func__);
+ return status;
+ }
+
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status1 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ halt_status2 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS2_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0d5,
+ "scsi(%ld): %s, ISP8044 "
+ "Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, "
+ "PEG_HALT_STATUS2: 0x%x,\n",
+ vha->host_no, __func__, halt_status1,
+ halt_status2);
+ status = QLA_FUNCTION_FAILED;
+ }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
+
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
+}
+
+void
+qla8044_watchdog(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state, halt_status;
+ int halt_status_unrecoverable = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* don't poll if reset is going on or FW hang in quiescent state */
+ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (qla8044_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d6,
+ "%s: HW State: NEED RESET!\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+ !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d7,
+ "%s: HW State: NEED QUIES detected!\n",
+ __func__);
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* Check firmware health */
+ if (qla8044_check_fw_alive(vha)) {
+ halt_status = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ if (halt_status &
+ QLA8044_HALT_STATUS_FW_RESET) {
+ ql_log(ql_log_fatal, vha,
+ 0xb0d8, "%s: Firmware "
+ "error detected device "
+ "is being reset\n",
+ __func__);
+ } else if (halt_status &
+ QLA8044_HALT_STATUS_UNRECOVERABLE) {
+ halt_status_unrecoverable = 1;
+ }
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status_unrecoverable) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ if (dev_state ==
+ QLA8XXX_DEV_QUIESCENT) {
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ ql_log(ql_log_info, vha, 0xb0d9,
+ "%s: FW CONTEXT Reset "
+ "needed!\n", __func__);
+ } else {
+ ql_log(ql_log_info, vha,
+ 0xb0da, "%s: "
+ "detect abort needed\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla82xx_clear_pending_mbx(vha);
+ }
+ }
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_log(ql_log_warn, vha, 0xb10a,
+ "Firmware hung.\n");
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+
+ }
+}
+
+static int
+qla8044_minidump_process_control(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr)
+{
+ struct qla8044_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index;
+ uint32_t crb_addr, rval = QLA_SUCCESS;
+ unsigned long wtime;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla8044_wr_reg_indirect(vha, crb_addr,
+ crb_entry->value_1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value |= crb_entry->value_3;
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1) {
+ break;
+ } else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ qla8044_rd_reg_indirect(vha,
+ crb_addr, &read_value);
+ }
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ qla8044_rd_reg_indirect(vha, addr, &read_value);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ return rval;
+}
+
+static void
+qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
+ crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_addr;
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla8044_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
+ m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size);
+
+ if (r_addr & 0xf) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
+ "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ __func__, r_addr);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
+ r_value = 0;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+ r_value = MIU_TA_CTL_START_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ &r_value);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n", __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
+ &r_data);
+ *data_ptr++ = r_data;
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
+ "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+/* ISP83xx flash read for _RDROM _BOARD */
+static uint32_t
+qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t fl_addr, u32_count, rval;
+ struct qla8044_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
+ fl_addr = rom_hdr->read_addr;
+ u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+ __func__, fl_addr, u32_count);
+
+ rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
+ (u8 *)(data_ptr), u32_count);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0f6,
+ "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
+ return QLA_FUNCTION_FAILED;
+ } else {
+ data_ptr += u32_count;
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+ }
+}
+
+static void
+qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+
+ ql_log(ql_log_info, vha, 0xb0f7,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ vha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+static int
+qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ if (c_value_w)
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ qla8044_rd_reg_indirect(vha, c_addr,
+ &c_value_r);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void
+qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
+
+ ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt);
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
+
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
+
+ mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_queue(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla8044_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
+ q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, qid);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t
+qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+ uint16_t s_stride, i;
+ struct qla8044_minidump_entry_pollrd *pollrd_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
+ s_addr = pollrd_hdr->select_addr;
+ r_addr = pollrd_hdr->read_addr;
+ s_value = pollrd_hdr->select_value;
+ s_stride = pollrd_hdr->select_value_stride;
+
+ poll_wait = pollrd_hdr->poll_wait;
+ poll_mask = pollrd_hdr->poll_mask;
+
+ for (i = 0; i < pollrd_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ poll_wait = pollrd_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, s_addr, &r_value);
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0fe,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+ uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+ struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
+ sel_val1 = rdmux2_hdr->select_value_1;
+ sel_val2 = rdmux2_hdr->select_value_2;
+ sel_addr1 = rdmux2_hdr->select_addr_1;
+ sel_addr2 = rdmux2_hdr->select_addr_2;
+ sel_val_mask = rdmux2_hdr->select_value_mask;
+ read_addr = rdmux2_hdr->read_addr;
+
+ for (i = 0; i < rdmux2_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ sel_val1 += rdmux2_hdr->select_value_stride;
+ sel_val2 += rdmux2_hdr->select_value_stride;
+ }
+
+ *d_ptr = data_ptr;
+}
+
+static uint32_t
+qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t poll_wait, poll_mask, r_value, data;
+ uint32_t addr_1, addr_2, value_1, value_2;
+ struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
+ addr_1 = poll_hdr->addr_1;
+ addr_2 = poll_hdr->addr_2;
+ value_1 = poll_hdr->value_1;
+ value_2 = poll_hdr->value_2;
+ poll_mask = poll_hdr->poll_mask;
+
+ qla8044_wr_reg_indirect(vha, addr_1, value_1);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0ff,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ qla8044_rd_reg_indirect(vha, addr_2, &data);
+ data &= poll_hdr->modify_mask;
+ qla8044_wr_reg_indirect(vha, addr_2, data);
+ qla8044_wr_reg_indirect(vha, addr_1, value_2);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb100,
+ "%s: TIMEOUT2\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ *data_ptr++ = addr_2;
+ *data_ptr++ = data;
+
+ *d_ptr = data_ptr;
+
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+#define ISP8044_PEX_DMA_ENGINE_INDEX 8
+#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
+#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000
+#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
+#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
+#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+static int
+qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ return QLA_FUNCTION_FAILED;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla8044_start_pex_dma(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
+ rval = QLA_FUNCTION_FAILED;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int
+qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t chunk_size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla8044_pex_dma_descriptor dma_desc;
+
+ rval = qla8044_check_dma_engine_state(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb147,
+ "DMA engine not available. Fallback to rdmem-read.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ m_hdr = (void *)entry_hdr;
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb148,
+ "Unable to allocate rdmem dma buffer\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ * dma_bus_addr: dma buffer address
+ * cmd.read_data_size: amount of data-chunk to be read.
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |=
+ ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+
+ dma_desc.dma_bus_addr = rdmem_dma;
+ dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
+ read_size = 0;
+
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size <
+ ISP8044_PEX_DMA_READ_SIZE) {
+ chunk_size = (m_hdr->read_data_size - read_size);
+ dma_desc.cmd.read_data_size = chunk_size;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla8044_ms_mem_write_128b(vha,
+ m_hdr->desc_card_addr, (void *)&dma_desc,
+ (sizeof(struct qla8044_pex_dma_descriptor)/16));
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb14a,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb14b,
+ "%s: Dma-descriptor: Instruct for rdmem dma "
+ "(chunk_size 0x%x).\n", __func__, chunk_size);
+
+ /* Execute: Start pex-dma operation. */
+ rval = qla8044_start_pex_dma(vha, m_hdr);
+ if (rval)
+ goto error_exit;
+
+ memcpy(data_ptr, rdmem_buffer, chunk_size);
+ data_ptr += chunk_size;
+ read_size += chunk_size;
+ }
+
+ *d_ptr = (void *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ return rval;
+}
+
+/*
+ *
+ * qla8044_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+int
+qla8044_collect_md_data(struct scsi_qla_host *vha)
+{
+ int num_entry_hdr = 0;
+ struct qla8044_minidump_entry_hdr *entry_hdr;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0, f_capture_mask;
+ int i, rval = QLA_FUNCTION_FAILED;
+ uint64_t now;
+ uint32_t timestamp, idc_control;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->md_dump) {
+ ql_log(ql_log_info, vha, 0xb101,
+ "%s(%ld) No buffer to dump\n",
+ __func__, vha->host_no);
+ return rval;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xb10d,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
+ goto md_failed;
+ }
+
+ ha->fw_dumped = 0;
+
+ if (!ha->md_tmplt_hdr || !ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb10e,
+ "Memory not allocated for minidump capture\n");
+ goto md_failed;
+ }
+
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_control & GRACEFUL_RESET_BIT1) {
+ ql_log(ql_log_warn, vha, 0xb112,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control & ~GRACEFUL_RESET_BIT1));
+ qla8044_idc_unlock(ha);
+
+ goto md_failed;
+ }
+ qla8044_idc_unlock(ha);
+
+ if (qla82xx_validate_template_chksum(vha)) {
+ ql_log(ql_log_info, vha, 0xb109,
+ "Template checksum validation error\n");
+ goto md_failed;
+ }
+
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb11a,
+ "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+ f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+ /* Validate whether required debug level is set */
+ if ((f_capture_mask & 0x3) != 0x3) {
+ ql_log(ql_log_warn, vha, 0xb10f,
+ "Minimum required capture mask[0x%x] level not set\n",
+ f_capture_mask);
+
+ }
+ tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+ ql_log(ql_log_info, vha, 0xb102,
+ "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql_log(ql_log_info, vha, 0xb10b,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql_log(ql_log_info, vha, 0xb10c,
+ "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->md_dump_size, ha->md_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+ tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
+ tmplt_hdr->ocm_window_reg[ha->portnum];
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected > ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb103,
+ "Data collected: [0x%x], "
+ "Total Dump size: [0x%x]\n",
+ data_collected, ha->md_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ql2xmdcapmask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb104,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->md_dump_size - data_collected));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla8044_minidump_process_control(vha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla8044_minidump_process_rdcrb(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla8044_minidump_pex_dma_read(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ rval = qla8044_minidump_process_rdmem(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ goto md_failed;
+ }
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ rval = qla8044_minidump_process_rdrom(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ }
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla8044_minidump_process_l2tag(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8044_L1DTG:
+ case QLA8044_L1ITG:
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla8044_minidump_process_l1cache(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla8044_minidump_process_rdocm(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla8044_minidump_process_rdmux(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla8044_minidump_process_queue(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRD:
+ rval = qla8044_minidump_process_pollrd(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDMUX2:
+ qla8044_minidump_process_rdmux2(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRDMWR:
+ rval = qla8044_minidump_process_pollrdmwr(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ (uint8_t *)((uint8_t *)ha->md_dump);
+skip_nxt_entry:
+ /*
+ * next entry in the template
+ */
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+ }
+
+ if (data_collected != ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb105,
+ "Dump data mismatch: Data collected: "
+ "[0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->md_dump_size);
+ goto md_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0xb110,
+ "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+ vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+
+ ql_log(ql_log_info, vha, 0xb106,
+ "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i);
+md_failed:
+ return rval;
+}
+
+void
+qla8044_get_minidump(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!qla8044_collect_md_data(vha)) {
+ ha->fw_dumped = 1;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0db,
+ "%s: Unable to collect minidump\n",
+ __func__);
+ }
+}
+
+static int
+qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
+{
+ uint32_t flash_status;
+ int retries = QLA8044_FLASH_READ_RETRY_COUNT;
+ int ret_val = QLA_SUCCESS;
+
+ while (retries--) {
+ ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
+ &flash_status);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb13c,
+ "%s: Failed to read FLASH_STATUS reg.\n",
+ __func__);
+ break;
+ }
+ if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
+ QLA8044_FLASH_STATUS_READY)
+ break;
+ msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
+ }
+
+ if (!retries)
+ ret_val = QLA_FUNCTION_FAILED;
+
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
+ uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ uint32_t cmd;
+
+ cmd = vha->hw->fdt_wrt_sts_reg_cmd;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb125,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb126,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb127,
+ "%s: Failed to write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb128,
+ "%s: Error polling flash status reg.\n", __func__);
+
+exit_func:
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_unprotect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb139,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_protect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb13b,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+
+static int
+qla8044_erase_flash_sector(struct scsi_qla_host *vha,
+ uint32_t sector_start_addr)
+{
+ uint32_t reversed_addr;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12e,
+ "%s: Poll flash status after erase failed..\n", __func__);
+ }
+
+ reversed_addr = (((sector_start_addr & 0xFF) << 16) |
+ (sector_start_addr & 0xFF00) |
+ ((sector_start_addr & 0xFF0000) >> 16));
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_WRDATA, reversed_addr);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12f,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb130,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb131,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb132,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+
+ return ret_val;
+}
+
+/*
+ * qla8044_flash_write_u32 - Write data to flash
+ *
+ * @ha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * p_data : Data to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ *
+ * NOTE: Lock should be held on entry
+ */
+static int
+qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
+ uint32_t *p_data)
+{
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ 0x00800000 | (addr >> 2));
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb134,
+ "%s: Failed write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb135,
+ "%s: Failed write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb136,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb137,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+exit_func:
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t spi_val;
+
+ if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
+ dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
+ ql_dbg(ql_dbg_user, vha, 0xb123,
+ "Got unsupported dwords = 0x%x.\n",
+ dwords);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL);
+
+ /* First DWORD write to FLASH_WRDATA */
+ ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
+ *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_FIRST_MS_PATTERN);
+
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb124,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+
+ dwords--;
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_SECOND_TEMP_VAL);
+
+
+ /* Second to N-1 DWORDS writes */
+ while (dwords != 1) {
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb129,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ dwords--;
+ }
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
+
+ /* Last DWORD write */
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb12a,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
+
+ if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
+ ql_log(ql_log_warn, vha, 0xb12b,
+ "%s: Failed.\n", __func__);
+ spi_val = 0;
+ /* Operation failed, clear error bit. */
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ }
+exit_func:
+ return ret;
+}
+
+static int
+qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t liter;
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ ret = qla8044_flash_write_u32(vha, faddr, dwptr);
+ if (ret) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb141,
+ "%s: flash address=%x data=%x.\n", __func__,
+ faddr, *dwptr);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int
+qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
+ int dword_count, erase_sec_count;
+ uint32_t erase_offset;
+ uint8_t *p_cache, *p_src;
+
+ erase_offset = offset;
+
+ p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
+ if (!p_cache)
+ return QLA_FUNCTION_FAILED;
+
+ memcpy(p_cache, buf, length);
+ p_src = p_cache;
+ dword_count = length / sizeof(uint32_t);
+ /* Since the offset and legth are sector aligned, it will be always
+ * multiple of burst_iter_count (64)
+ */
+ burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
+ erase_sec_count = length / QLA8044_SECTOR_SIZE;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ /* Lock and enable write for whole operation. */
+ qla8044_flash_lock(vha);
+ qla8044_unprotect_flash(vha);
+
+ /* Erasing the sectors */
+ for (i = 0; i < erase_sec_count; i++) {
+ rval = qla8044_erase_flash_sector(vha, erase_offset);
+ ql_dbg(ql_dbg_user, vha, 0xb138,
+ "Done erase of sector=0x%x.\n",
+ erase_offset);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb121,
+ "Failed to erase the sector having address: "
+ "0x%x.\n", erase_offset);
+ goto out;
+ }
+ erase_offset += QLA8044_SECTOR_SIZE;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb13f,
+ "Got write for addr = 0x%x length=0x%x.\n",
+ offset, length);
+
+ for (i = 0; i < burst_iter_count; i++) {
+
+ /* Go with write. */
+ rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
+ offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
+ if (rval) {
+ /* Buffer Mode failed skip to dword mode */
+ ql_log(ql_log_warn, vha, 0xb122,
+ "Failed to write flash in buffer mode, "
+ "Reverting to slow-write.\n");
+ rval = qla8044_write_flash_dword_mode(vha,
+ (uint32_t *)p_src, offset,
+ QLA8044_MAX_OPTROM_BURST_DWORDS);
+ }
+ p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb133,
+ "Done writing.\n");
+
+out:
+ qla8044_protect_flash(vha);
+ qla8044_flash_unlock(vha);
+ scsi_unblock_requests(vha->host);
+ kfree(p_cache);
+
+ return rval;
+}
+
+#define LEG_INT_PTR_B31 (1 << 31)
+#define LEG_INT_PTR_B30 (1 << 30)
+#define PF_BITS_MASK (0xF << 16)
+/**
+ * qla8044_intr_handler() - Process interrupts for the ISP8044
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla8044_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+ uint32_t leg_int_ptr = 0, pf_bit;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0xb143,
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+
+ /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+ if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb144,
+ "%s: Legacy Interrupt Bit 31 not set, "
+ "spurious interrupt!\n", __func__);
+ return IRQ_NONE;
+ }
+
+ pf_bit = ha->portnum << 16;
+ /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb145,
+ "%s: Incorrect function ID 0x%x in "
+ "legacy interrupt register, "
+ "ha->pf_bit = 0x%x\n", __func__,
+ (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
+ return IRQ_NONE;
+ }
+
+ /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+ * Control register and poll till Legacy Interrupt Pointer register
+ * bit32 is 0.
+ */
+ qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
+ do {
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
+ break;
+ } while (leg_int_ptr & (LEG_INT_PTR_B30));
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_p3p, vha, 0xb146,
+ "Unrecognized interrupt type "
+ "(%d).\n", stat & 0xff);
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int
+qla8044_idc_dontreset(struct qla_hw_data *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ return idc_ctrl & DONTRESET_BIT0;
+}
+
+static void
+qla8044_clear_rst_ready(scsi_qla_host_t *vha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /*
+ * For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP82xx, drv_active has 4 bits per function
+ */
+ drv_state &= ~(1 << vha->hw->portnum);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb13d,
+ "drv_state: 0x%08x\n", drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+int
+qla8044_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_idc_lock(ha);
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (ql2xdontresethba)
+ qla8044_set_idc_dontreset(vha);
+
+ /* If device_state is NEED_RESET, go ahead with
+ * Reset,irrespective of ql2xdontresethba. This is to allow a
+ * non-reset-owner to force a reset. Non-reset-owner sets
+ * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+ * and then forces a Reset by setting device_state to
+ * NEED_RESET. */
+ if (dev_state == QLA8XXX_DEV_READY) {
+ /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
+ * recovery */
+ if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb13e,
+ "Reset recovery disabled\n");
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_isp_reset;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb140,
+ "HW State: NEED RESET\n");
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
+ }
+
+ /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
+ * and which drivers are present. Unlike ISP82XX, the function setting
+ * NEED_RESET, may not be the Reset owner. */
+ qla83xx_reset_ownership(vha);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ qla8044_idc_lock(ha);
+ qla8044_clear_rst_ready(vha);
+
+exit_isp_reset:
+ qla8044_idc_unlock(ha);
+ if (rval == QLA_SUCCESS) {
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ rval = qla82xx_restart_isp(vha);
+ }
+
+ return rval;
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
new file mode 100644
index 000000000000..2ab2eabab908
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -0,0 +1,551 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_NX2_H
+#define __QLA_NX2_H
+
+#define QSNT_ACK_TOV 30
+#define INTENT_TO_RECOVER 0x01
+#define PROCEED_TO_RECOVER 0x02
+#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C
+#define IDC_LOCK_RECOVERY_STATE_MASK 0x3
+#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2
+
+#define QLA8044_DRV_LOCK_MSLEEP 200
+#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
+#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \
+ MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000)
+#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000)
+#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000)
+#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff)
+#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000)
+#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000)
+#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
+
+/* PCI Windowing for DDR regions. */
+#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
+/* Indirectly Mapped Registers */
+#define QLA8044_FLASH_SPI_STATUS 0x2808E010
+#define QLA8044_FLASH_SPI_CONTROL 0x2808E014
+#define QLA8044_FLASH_STATUS 0x42100004
+#define QLA8044_FLASH_CONTROL 0x42110004
+#define QLA8044_FLASH_ADDR 0x42110008
+#define QLA8044_FLASH_WRDATA 0x4211000C
+#define QLA8044_FLASH_RDDATA 0x42110018
+#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030
+#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Flash access regs */
+#define QLA8044_FLASH_LOCK 0x3850
+#define QLA8044_FLASH_UNLOCK 0x3854
+#define QLA8044_FLASH_LOCK_ID 0x3500
+
+/* Driver Lock regs */
+#define QLA8044_DRV_LOCK 0x3868
+#define QLA8044_DRV_UNLOCK 0x386C
+#define QLA8044_DRV_LOCK_ID 0x3504
+#define QLA8044_DRV_LOCKRECOVERY 0x379C
+
+/* IDC version */
+#define QLA8044_IDC_VER_MAJ_VALUE 0x1
+#define QLA8044_IDC_VER_MIN_VALUE 0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA8044_CRB_IDC_VER_MAJOR 0x3780
+#define QLA8044_CRB_IDC_VER_MINOR 0x3798
+#define QLA8044_IDC_DRV_AUDIT 0x3794
+#define QLA8044_SRE_SHIM_CONTROL 0x0D200284
+#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4
+#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4
+#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388
+#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388
+#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C
+#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C
+#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704
+#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704
+
+/* set value to pause threshold value */
+#define QLA8044_SET_PAUSE_VAL 0x0
+#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF
+#define QLA8044_PEG_HALT_STATUS1 0x34A8
+#define QLA8044_PEG_HALT_STATUS2 0x34AC
+#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
+#define QLA8044_FW_CAPABILITIES 0x3528
+#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
+#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
+#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
+#define QLA8044_CRB_DRV_SCRATCH 0x3548
+#define QLA8044_CRB_DEV_PART_INFO1 0x37E0
+#define QLA8044_CRB_DEV_PART_INFO2 0x37E4
+#define QLA8044_FW_VER_MAJOR 0x3550
+#define QLA8044_FW_VER_MINOR 0x3554
+#define QLA8044_FW_VER_SUB 0x3558
+#define QLA8044_NPAR_STATE 0x359C
+#define QLA8044_FW_IMAGE_VALID 0x35FC
+#define QLA8044_CMDPEG_STATE 0x3650
+#define QLA8044_ASIC_TEMP 0x37B4
+#define QLA8044_FW_API 0x356C
+#define QLA8044_DRV_OP_MODE 0x3570
+#define QLA8044_CRB_WIN_BASE 0x3800
+#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4))
+#define QLA8044_SEM_LOCK_BASE 0x3840
+#define QLA8044_SEM_UNLOCK_BASE 0x3844
+#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8))
+#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8))
+#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
+#define QLA8044_LINK_SPEED_FACTOR 10
+
+/* FLASH API Defines */
+#define QLA8044_FLASH_MAX_WAIT_USEC 100
+#define QLA8044_FLASH_LOCK_TIMEOUT 10000
+#define QLA8044_FLASH_SECTOR_SIZE 65536
+#define QLA8044_DRV_LOCK_TIMEOUT 2000
+#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA8044_FLASH_WRITE_CMD 0xdacdacda
+#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA8044_FLASH_READ_RETRY_COUNT 2000
+#define QLA8044_FLASH_STATUS_READY 0x6
+#define QLA8044_FLASH_BUFFER_WRITE_MIN 2
+#define QLA8044_FLASH_BUFFER_WRITE_MAX 64
+#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA8044_ERASE_MODE 1
+#define QLA8044_WRITE_MODE 2
+#define QLA8044_DWORD_WRITE_MODE 3
+#define QLA8044_GLOBAL_RESET 0x38CC
+#define QLA8044_WILDCARD 0x38F0
+#define QLA8044_INFORMANT 0x38FC
+#define QLA8044_HOST_MBX_CTRL 0x3038
+#define QLA8044_FW_MBX_CTRL 0x303C
+#define QLA8044_BOOTLOADER_ADDR 0x355C
+#define QLA8044_BOOTLOADER_SIZE 0x3560
+#define QLA8044_FW_IMAGE_ADDR 0x3564
+#define QLA8044_MBX_INTR_ENABLE 0x1000
+#define QLA8044_MBX_INTR_MASK 0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0 0x1
+#define GRACEFUL_RESET_BIT1 0x2
+
+/* ISP8044 PEG_HALT_STATUS1 bits */
+#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29)
+#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLA8044_BOOT_FROM_FLASH 0
+#define QLA8044_IDC_PARAM_ADDR 0x3e8020
+
+/* FLASH related definitions */
+#define QLA8044_OPTROM_BURST_SIZE 0x100
+#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4)
+#define QLA8044_MIN_OPTROM_BURST_DWORDS 2
+#define QLA8044_SECTOR_SIZE (64 * 1024)
+
+#define QLA8044_FLASH_SPI_CTL 0x4
+#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000
+#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001
+#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43
+#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D
+#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100
+#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLA8044_FLASH_ERASE_SIG 0xFD0300
+#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D
+
+/* Reset template definitions */
+#define QLA8044_MAX_RESET_SEQ_ENTRIES 16
+#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000
+#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLA8044_RESET_SEQ_VERSION 0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP 0x0000
+#define OPCODE_WRITE_LIST 0x0001
+#define OPCODE_READ_WRITE_LIST 0x0002
+#define OPCODE_POLL_LIST 0x0004
+#define OPCODE_POLL_WRITE_LIST 0x0008
+#define OPCODE_READ_MODIFY_WRITE 0x0010
+#define OPCODE_SEQ_PAUSE 0x0020
+#define OPCODE_SEQ_END 0x0040
+#define OPCODE_TMPL_END 0x0080
+#define OPCODE_POLL_READ_LIST 0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
+#define QLA8044_IDC_DRV_CTRL 0x3790
+#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */
+
+#define MINIDUMP_SIZE_36K 36864
+
+struct qla8044_reset_template_hdr {
+ uint16_t version;
+ uint16_t signature;
+ uint16_t size;
+ uint16_t entries;
+ uint16_t hdr_size;
+ uint16_t checksum;
+ uint16_t init_seq_offset;
+ uint16_t start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla8044_reset_entry_hdr {
+ uint16_t cmd;
+ uint16_t size;
+ uint16_t count;
+ uint16_t delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla8044_poll {
+ uint32_t test_mask;
+ uint32_t test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla8044_rmw {
+ uint32_t test_mask;
+ uint32_t xor_value;
+ uint32_t or_value;
+ uint8_t shl;
+ uint8_t shr;
+ uint8_t index_a;
+ uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla8044_entry {
+ uint32_t arg1;
+ uint32_t arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla8044_quad_entry {
+ uint32_t dr_addr;
+ uint32_t dr_value;
+ uint32_t ar_addr;
+ uint32_t ar_value;
+} __packed;
+
+struct qla8044_reset_template {
+ int seq_index;
+ int seq_error;
+ int array_index;
+ uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
+ uint8_t *buff;
+ uint8_t *stop_offset;
+ uint8_t *start_offset;
+ uint8_t *init_offset;
+ struct qla8044_reset_template_hdr *hdr;
+ uint8_t seq_end;
+ uint8_t template_end;
+};
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8044_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+} __packed;
+
+/* Read CRB entry header */
+struct qla8044_minidump_entry_crb {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+} __packed;
+
+struct qla8044_minidump_entry_cache {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+} __packed;
+
+/* Read OCM */
+struct qla8044_minidump_entry_rdocm {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+} __packed;
+
+/* Read Memory */
+struct qla8044_minidump_entry_rdmem {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read Memory: For Pex-DMA */
+struct qla8044_minidump_entry_rdmem_pex_dma {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Read ROM */
+struct qla8044_minidump_entry_rdrom {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Mux entry */
+struct qla8044_minidump_entry_mux {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+} __packed;
+
+/* Queue entry */
+struct qla8044_minidump_entry_queue {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+} __packed;
+
+/* POLLRD Entry */
+struct qla8044_minidump_entry_pollrd {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t read_addr;
+ uint32_t select_value;
+ uint16_t select_value_stride;
+ uint16_t op_count;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t data_size;
+ uint32_t rsvd_1;
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla8044_minidump_entry_rdmux2 {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr_1;
+ uint32_t select_addr_2;
+ uint32_t select_value_1;
+ uint32_t select_value_2;
+ uint32_t op_count;
+ uint32_t select_value_mask;
+ uint32_t read_addr;
+ uint8_t select_value_stride;
+ uint8_t data_size;
+ uint8_t rsvd[2];
+} __packed;
+
+/* POLLRDMWR Entry */
+struct qla8044_minidump_entry_pollrdmwr {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+} __packed;
+
+/* IDC additional information */
+struct qla8044_idc_information {
+ uint32_t request_desc; /* IDC request descriptor */
+ uint32_t info1; /* IDC additional info */
+ uint32_t info2; /* IDC additional info */
+ uint32_t info3; /* IDC additional info */
+} __packed;
+
+enum qla_regs {
+ QLA8044_PEG_HALT_STATUS1_INDEX = 0,
+ QLA8044_PEG_HALT_STATUS2_INDEX,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX,
+ QLA8044_CRB_DRV_ACTIVE_INDEX,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8044_CRB_DRV_STATE_INDEX,
+ QLA8044_CRB_DRV_SCRATCH_INDEX,
+ QLA8044_CRB_DEV_PART_INFO_INDEX,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ QLA8044_FW_VERSION_MAJOR_INDEX,
+ QLA8044_FW_VERSION_MINOR_INDEX,
+ QLA8044_FW_VERSION_SUB_INDEX,
+ QLA8044_CRB_CMDPEG_STATE_INDEX,
+ QLA8044_CRB_TEMP_STATE_INDEX,
+} __packed;
+
+#define CRB_REG_INDEX_MAX 14
+#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
+#define CRB_CMDPEG_CHECK_DELAY 500
+
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
+/* MiniDump Structures */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+#define QLA8044_SS_OCM_WNDREG_INDEX 3
+#define QLA8044_DBG_STATE_ARRAY_LEN 16
+#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA8044_DBG_RSVD_ARRAY_LEN 8
+#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
+#define QLA8044_SS_PCI_INDEX 0
+
+struct qla8044_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
+};
+
+struct qla8044_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
+ uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3e21e9fc9d91..9f01bbbf3a26 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1247,7 +1247,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(vha->hw)) {
if (!qla82xx_fcoe_ctx_reset(vha)) {
/* Ctx reset success */
ret = SUCCESS;
@@ -1303,6 +1303,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLAFX00(ha)) {
+ return qlafx00_loop_reset(vha);
+ }
+
if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
@@ -1311,14 +1315,12 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
ret = ha->isp_ops->target_reset(fcport, 0, 0);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802c,
- "Bus Reset failed: Target Reset=%d "
+ "Bus Reset failed: Reset=%d "
"d_id=%x.\n", ret, fcport->d_id.b24);
}
}
}
- if (IS_QLAFX00(ha))
- return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1506,7 +1508,7 @@ qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
if (sdev->queue_depth > shost->cmd_per_lun) {
if (sdev->queue_depth < ha->cfg_lun_q_depth)
continue;
- ql_log(ql_log_warn, vp, 0x3031,
+ ql_dbg(ql_dbg_io, vp, 0x3031,
"%ld:%d:%d: Ramping down queue depth to %d",
vp->host_no, sdev->id, sdev->lun,
ha->cfg_lun_q_depth);
@@ -1911,7 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -1949,7 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -1987,7 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -2025,7 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -2060,13 +2062,51 @@ static struct isp_operations qla82xx_isp_ops = {
.beacon_blink = NULL,
.read_optrom = qla82xx_read_optrom_data,
.write_optrom = qla82xx_write_optrom_data,
- .get_flash_version = qla24xx_get_flash_version,
+ .get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
+static struct isp_operations qla8044_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla8044_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla82xx_beacon_on,
+ .beacon_off = qla82xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla8044_write_optrom_data,
+ .get_flash_version = qla82xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla8044_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
static struct isp_operations qla83xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
@@ -2237,6 +2277,14 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8044:
+ ha->device_type |= DT_ISP8044;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
case PCI_DEVICE_ID_QLOGIC_ISP2031:
ha->device_type |= DT_ISP2031;
ha->device_type |= DT_ZIO_SUPPORTED;
@@ -2317,7 +2365,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
-
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2330,7 +2377,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2484,6 +2532,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA8044(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla8044_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA83XX(ha)) {
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
@@ -2512,6 +2575,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->port_down_retry_count = 30; /* default value */
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
ha->mr.fw_hbt_en = 1;
}
@@ -2676,7 +2740,7 @@ que_init:
rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
}
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
req->req_q_out = &ha->iobase->isp82.req_q_out[0];
rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
@@ -2709,6 +2773,14 @@ que_init:
qla82xx_idc_unlock(ha);
ql_log(ql_log_fatal, base_vha, 0x00d7,
"HW State: FAILED.\n");
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_fatal, base_vha, 0x0150,
+ "HW State: FAILED.\n");
}
ret = -ENODEV;
@@ -2804,6 +2876,13 @@ skip_dpc:
ha->isp_ops->enable_intrs(ha);
+ if (IS_QLAFX00(ha)) {
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+ host->sg_tablesize = (ha->mr.extended_io_enabled) ?
+ QLA_SG_ALL : 128;
+ }
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2824,9 +2903,6 @@ skip_dpc:
if (IS_QLAFX00(ha)) {
ret = qlafx00_fx_disc(base_vha,
- &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
-
- ret = qlafx00_fx_disc(base_vha,
&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
/* Register system information */
@@ -2881,8 +2957,13 @@ probe_hw_failed:
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
}
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(base_vha);
+ qla8044_idc_unlock(ha);
+ }
iospace_config_failed:
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
@@ -2930,6 +3011,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
vha = pci_get_drvdata(pdev);
ha = vha->hw;
+ /* Notify ISPFX00 firmware */
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(vha, 20);
+
/* Turn-off FCE trace */
if (ha->flags.fce_enabled) {
qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -2977,6 +3062,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->flags.host_shutting_down = 1;
set_bit(UNLOADING, &base_vha->dpc_flags);
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(base_vha, 20);
+
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -3061,6 +3149,11 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(base_vha);
+ qla8044_idc_unlock(ha);
+ }
if (IS_QLA82XX(ha)) {
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
@@ -3210,14 +3303,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Port login retry "
- "%02x%02x%02x%02x%02x%02x%02x%02x, "
- "id = 0x%04x retry cnt=%d.\n",
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
- fcport->loop_id, fcport->login_retry);
+ "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id, fcport->login_retry);
}
}
@@ -3290,7 +3377,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
/* Allocate cache for CT6 Ctx. */
if (!ctx_cachep) {
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -3324,7 +3411,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
- if (IS_QLA82XX(ha) || ql2xenabledif) {
+ if (IS_P3P_TYPE(ha) || ql2xenabledif) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
@@ -3532,7 +3619,7 @@ fail:
* Frees fw dump stuff.
*
* Input:
-* ha = adapter block pointer.
+* ha = adapter block pointer
*/
static void
qla2x00_free_fw_dump(struct qla_hw_data *ha)
@@ -4699,17 +4786,33 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
- if (IS_QLA82XX(ha)) {
- if (test_and_clear_bit(ISP_UNRECOVERABLE,
- &base_vha->dpc_flags)) {
- qla82xx_idc_lock(ha);
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- qla82xx_idc_unlock(ha);
- ql_log(ql_log_info, base_vha, 0x4004,
- "HW State: FAILED.\n");
- qla82xx_device_state_handler(base_vha);
- continue;
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA8044(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x4004,
+ "HW State: FAILED.\n");
+ qla8044_device_state_handler(base_vha);
+ continue;
+ }
+
+ } else {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x0151,
+ "HW State: FAILED.\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
}
if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
@@ -4809,16 +4912,26 @@ qla2x00_do_dpc(void *data)
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
- if (IS_QLA82XX(ha)) {
- qla82xx_device_state_handler(base_vha);
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA82XX(ha))
+ qla82xx_device_state_handler(base_vha);
+ if (IS_QLA8044(ha))
+ qla8044_device_state_handler(base_vha);
clear_bit(ISP_QUIESCE_NEEDED,
&base_vha->dpc_flags);
if (!ha->flags.quiesce_owner) {
qla2x00_perform_loop_resync(base_vha);
-
- qla82xx_idc_lock(ha);
- qla82xx_clear_qsnt_ready(base_vha);
- qla82xx_idc_unlock(ha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_qsnt_ready(
+ base_vha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_qsnt_ready(
+ base_vha);
+ qla8044_idc_unlock(ha);
+ }
}
} else {
clear_bit(ISP_QUIESCE_NEEDED,
@@ -4992,10 +5105,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
/* Make sure qla82xx_watchdog is run only for physical port */
- if (!vha->vp_idx && IS_QLA82XX(ha)) {
+ if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
start_dpc++;
- qla82xx_watchdog(vha);
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+ else if (IS_QLA8044(ha))
+ qla8044_watchdog(vha);
}
if (!vha->vp_idx && IS_QLAFX00(ha))
@@ -5075,7 +5191,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Check if beacon LED needs to be blinked for physical host only */
if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
/* There is no beacon_blink function for ISP82xx */
- if (!IS_QLA82XX(ha)) {
+ if (!IS_P3P_TYPE(ha)) {
set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -5519,6 +5635,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3bef6736d885..bd56cde795fc 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -565,7 +565,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = FA_FLASH_LAYOUT_ADDR;
else if (IS_QLA81XX(ha))
*start = FA_FLASH_LAYOUT_ADDR_81;
- else if (IS_QLA82XX(ha)) {
+ else if (IS_P3P_TYPE(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
} else if (IS_QLA83XX(ha)) {
@@ -719,7 +719,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
start = le32_to_cpu(region->start) >> 2;
ql_dbg(ql_dbg_init, vha, 0x0049,
"FLT[%02x]: start=0x%x "
- "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
start, le32_to_cpu(region->end) >> 2,
le32_to_cpu(region->size));
@@ -741,13 +741,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (IS_QLA8031(ha))
break;
ha->flt_region_vpd_nvram = start;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
break;
if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
- if (IS_QLA82XX(ha) || IS_QLA8031(ha))
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
@@ -789,9 +789,17 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOT_CODE_82XX:
ha->flt_region_boot = start;
break;
+ case FLT_REG_BOOT_CODE_8044:
+ if (IS_QLA8044(ha))
+ ha->flt_region_boot = start;
+ break;
case FLT_REG_FW_82XX:
ha->flt_region_fw = start;
break;
+ case FLT_REG_CNA_FW:
+ if (IS_CNA_CAPABLE(ha))
+ ha->flt_region_fw = start;
+ break;
case FLT_REG_GOLD_FW_82XX:
ha->flt_region_gold_fw = start;
break;
@@ -803,13 +811,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
- if (!IS_QLA8031(ha))
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
if (ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_FCOE_NVRAM_1:
- if (!IS_QLA8031(ha))
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
if (!ha->flags.port0)
ha->flt_region_nvram = start;
@@ -883,7 +891,13 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
mid = le16_to_cpu(fdt->man_id);
fid = le16_to_cpu(fdt->id);
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
- ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
+ ha->fdt_wrt_enable = fdt->wrt_enable_bits;
+ ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd;
+ if (IS_QLA8044(ha))
+ ha->fdt_erase_cmd = fdt->erase_cmd;
+ else
+ ha->fdt_erase_cmd =
+ flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
if (fdt->unprotect_sec_cmd) {
ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
@@ -895,7 +909,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
goto done;
no_flash_data:
loc = locations[0];
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
goto done;
}
@@ -946,7 +960,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
return;
wptr = (uint32_t *)req->ring;
@@ -1008,6 +1022,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (ha->flags.nic_core_reset_hdlr_active)
return;
+ if (IS_QLA8044(ha))
+ return;
+
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
if (hdr.version == __constant_cpu_to_le16(0xffff))
@@ -1302,7 +1319,7 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return buf;
/* Dword reads to flash. */
@@ -1360,7 +1377,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return ret;
/* Enable flash write. */
@@ -1474,7 +1491,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1752,7 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -1804,7 +1821,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
ha->beacon_blink_led = 0;
@@ -2822,6 +2839,121 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
int
+qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+ int ret = QLA_SUCCESS;
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *bcode;
+ uint8_t code_type, last_image;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!mbuf)
+ return QLA_FUNCTION_FAILED;
+
+ memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+ memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ dcode = mbuf;
+
+ /* Begin with first PCI expansion ROM header. */
+ pcihdr = ha->flt_region_boot << 2;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0154,
+ "No matching ROM signature.\n");
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ /* Incorrect header. */
+ ql_log(ql_log_fatal, vha, 0x0155,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Read version */
+ code_type = bcode[0x14];
+ switch (code_type) {
+ case ROM_CODE_TYPE_BIOS:
+ /* Intel x86, PC-AT compatible. */
+ ha->bios_revision[0] = bcode[0x12];
+ ha->bios_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0156,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
+ break;
+ case ROM_CODE_TYPE_FCODE:
+ /* Open Firmware standard for PCI (FCode). */
+ ha->fcode_revision[0] = bcode[0x12];
+ ha->fcode_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0157,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
+ break;
+ case ROM_CODE_TYPE_EFI:
+ /* Extensible Firmware Interface (EFI). */
+ ha->efi_revision[0] = bcode[0x12];
+ ha->efi_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0158,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0159,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
+ break;
+ }
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dcode = mbuf;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
+ 0x20);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 &&
+ bcode[0x2] == 0x40 && bcode[0x3] == 0x40) {
+ ha->fw_revision[0] = bcode[0x4];
+ ha->fw_revision[1] = bcode[0x5];
+ ha->fw_revision[2] = bcode[0x6];
+ ql_dbg(ql_dbg_init, vha, 0x0153,
+ "Firmware revision %d.%d.%d\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2]);
+ }
+
+ return ret;
+}
+
+int
qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
@@ -2832,7 +2964,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
int i;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return ret;
if (!mbuf)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 83a8f7a9ec76..596480022b0a 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -10,7 +10,7 @@
*
* Forward port and refactoring to modern qla2xxx and target/configfs
*
- * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -430,13 +430,8 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
}
ql_dbg(ql_dbg_tgt, vha, 0xe047,
- "scsi(%ld): resetting (session %p from port "
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- "mcmd %x, loop_id %d)\n", vha->host_no, sess,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
+ "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
+ "loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id);
lun = a->u.isp24.fcp_cmnd.lun;
@@ -467,15 +462,10 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
sess->expires = jiffies + dev_loss_tmo * HZ;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+ "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
"deletion in %u secs (expires: %lu) immed: %d\n",
- sess->vha->vp_idx,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
- sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+ sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
+ sess->expires, immediate);
if (immediate)
schedule_delayed_work(&tgt->sess_del_work, 0);
@@ -630,13 +620,9 @@ static struct qla_tgt_sess *qlt_create_sess(
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
- "qla_target(%u): session allocation failed, "
- "all commands from port %02x:%02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7]);
+ "qla_target(%u): session allocation failed, all commands "
+ "from port %8phC will be refused", vha->vp_idx,
+ fcport->port_name);
return NULL;
}
@@ -680,15 +666,11 @@ static struct qla_tgt_sess *qlt_create_sess(
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
- "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
- " completion %ssupported) added\n",
- vha->vp_idx, local ? "local " : "", fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
- sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
- "" : "not ");
+ "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name,
+ fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
+ sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
return sess;
}
@@ -730,13 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
- "qla_target(%u): %ssession for port %02x:"
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
- "reappeared\n", vha->vp_idx, sess->local ? "local "
- : "", sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
+ "qla_target(%u): %ssession for port %8phC "
+ "(loop ID %d) reappeared\n", vha->vp_idx,
+ sess->local ? "local " : "", sess->port_name,
sess->loop_id);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
@@ -749,13 +727,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (sess && sess->local) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
"qla_target(%u): local session for "
- "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "(loop ID %d) became global\n", vha->vp_idx,
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
- sess->loop_id);
+ "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name, sess->loop_id);
sess->local = 0;
}
ha->tgt.tgt_ops->put_sess(sess);
@@ -2840,10 +2813,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
int res = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
- "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
- " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
- iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
- iocb->u.isp24.status_subcode);
+ "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
+ vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6c66d22eb1b1..a808e293dae0 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.05.00.03-k"
+#define QLA2XXX_VERSION "8.06.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 5
+#define QLA_DRIVER_MINOR_VER 6
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index a318092e033f..f85b9e5c1f05 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -2,12 +2,9 @@
* This file contains tcm implementation using v4 configfs fabric infrastructure
* for QLogic target mode HBAs
*
- * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
- * Licensed to the Linux Foundation under the General Public License (GPL)
- * version 2.
- *
- * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Author: Nicholas A. Bellinger <nab@daterainc.com>
*
* tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
* the TCM_FC / Open-FCoE.org fabric module.
@@ -360,6 +357,14 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
}
+static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
+}
+
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -489,38 +494,13 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
return 0;
}
-/*
- * The LIO target core uses DMA_TO_DEVICE to mean that data is going
- * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
- * that data is coming from the target (eg handling a READ). However,
- * this is just the opposite of what we have to tell the DMA mapping
- * layer -- eg when handling a READ, the HBA will have to DMA the data
- * out of memory so it can send it to the initiator, which means we
- * need to use DMA_TO_DEVICE when we map the data.
- */
-static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
-{
- if (se_cmd->se_cmd_flags & SCF_BIDI)
- return DMA_BIDIRECTIONAL;
-
- switch (se_cmd->data_direction) {
- case DMA_TO_DEVICE:
- return DMA_FROM_DEVICE;
- case DMA_FROM_DEVICE:
- return DMA_TO_DEVICE;
- case DMA_NONE:
- default:
- return DMA_NONE;
- }
-}
-
static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
@@ -656,7 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd, se_cmd);
cmd->bufflen = se_cmd->data_length;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
cmd->sg_cnt = se_cmd->t_data_nents;
@@ -680,7 +660,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg = NULL;
cmd->sg_cnt = 0;
cmd->offset = 0;
- cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -939,11 +919,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
+DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
&tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
&tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
&tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
NULL,
};
@@ -1042,6 +1030,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+ QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -1474,15 +1463,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
- pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
- sess,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
- sess->loop_id, loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
- s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+ pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+ sess, sess->port_name,
+ sess->loop_id, loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
+ s_id.b.area, s_id.b.al_pa);
if (sess->loop_id != loop_id) {
/*
@@ -1740,7 +1725,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
tcm_qla2xxx_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
- .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
@@ -1788,7 +1773,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
- .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 9ba075fe9781..329327528a55 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -29,6 +29,7 @@ struct tcm_qla2xxx_tpg_attrib {
int cache_dynamic_acls;
int demo_mode_write_protect;
int prod_mode_write_protect;
+ int demo_mode_login_only;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index d607eb8e24cb..8196c2f7915c 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -259,8 +259,8 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
* Return: On success return QLA_SUCCESS
* On error return QLA_ERROR
**/
-static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
- uint32_t *data, uint32_t count)
+int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+ uint32_t *data, uint32_t count)
{
int i, j;
uint32_t agt_ctrl;
@@ -1473,9 +1473,9 @@ int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
__func__));
}
- /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
- * and which drivers are present. Unlike ISP8022, the function setting
- * NEED_RESET, may not be the Reset owner. */
+ /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
+ * priority and which drivers are present. Unlike ISP8022, the function
+ * setting NEED_RESET, may not be the Reset owner. */
if (qla4_83xx_can_perform_reset(ha))
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index fab237fa32cc..a0de6e25ea5a 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -290,4 +290,38 @@ struct qla4_83xx_idc_information {
uint32_t info3; /* IDC additional info */
};
+#define QLA83XX_PEX_DMA_ENGINE_INDEX 8
+#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000
+#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000
+#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0
+#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024)
+#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+/* Read Memory: For Pex-DMA */
+struct qla4_83xx_minidump_entry_rdmem_pex_dma {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+struct qla4_83xx_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
+ * 8-15: desc-cmd */
+ uint8_t rsvd[24];
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 19ee55a6226c..463239c972b0 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -83,7 +83,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
if (is_qla8022(ha) ||
- (is_qla8032(ha) &&
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
qla4_83xx_can_perform_reset(ha))) {
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
set_bit(AF_FW_RECOVERY, &ha->flags);
@@ -158,14 +158,12 @@ qla4xxx_fw_version_show(struct device *dev,
if (is_qla80XX(ha))
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
- ha->firmware_version[0],
- ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
else
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
- ha->firmware_version[0],
- ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
}
static ssize_t
@@ -181,8 +179,8 @@ qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
- ha->iscsi_minor);
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
+ ha->fw_info.iscsi_minor);
}
static ssize_t
@@ -191,8 +189,8 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
- ha->bootload_major, ha->bootload_minor,
- ha->bootload_patch, ha->bootload_build);
+ ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
+ ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
}
static ssize_t
@@ -259,6 +257,63 @@ qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
}
+static ssize_t
+qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
+ ha->fw_info.fw_build_time);
+}
+
+static ssize_t
+qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
+}
+
+static ssize_t
+qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
+}
+
+static ssize_t
+qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ char *load_src = NULL;
+
+ switch (ha->fw_info.fw_load_source) {
+ case 1:
+ load_src = "Flash Primary";
+ break;
+ case 2:
+ load_src = "Flash Secondary";
+ break;
+ case 3:
+ load_src = "Host Download";
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
+}
+
+static ssize_t
+qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ qla4xxx_about_firmware(ha);
+ return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
+ ha->fw_uptime_msecs);
+}
+
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
@@ -269,6 +324,12 @@ static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
+static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
+static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
+static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
+ NULL);
+static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
+static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_fw_version,
@@ -281,5 +342,10 @@ struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_phy_port_num,
&dev_attr_iscsi_func_cnt,
&dev_attr_hba_model,
+ &dev_attr_fw_timestamp,
+ &dev_attr_fw_build_user,
+ &dev_attr_fw_ext_timestamp,
+ &dev_attr_fw_load_src,
+ &dev_attr_fw_uptime,
NULL,
};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 8acdc582ff6d..cf8fdf1d1257 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2011 QLogic Corporation
+ * Copyright (c) 2011-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 77b7c594010f..5649e9ef59a8 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -141,21 +141,22 @@ void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
ql4_printk(KERN_INFO, ha,
- "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
" PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
- " PEG_NET_4_PC: 0x%x\n", ha->host_no,
- __func__, halt_status1, halt_status2,
+ " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
+ ha->pdev->device, halt_status1, halt_status2,
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha,
- "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
- ha->host_no, __func__, halt_status1, halt_status2);
+ ha->host_no, __func__, ha->pdev->device,
+ halt_status1, halt_status2);
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ddf16a86bbf5..084d1fd59c9e 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -64,6 +64,10 @@
#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
#endif
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042
+#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042
+#endif
+
#define ISP4XXX_PCI_FN_1 0x1
#define ISP4XXX_PCI_FN_2 0x3
@@ -201,6 +205,7 @@
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
+#define IDC_EXTEND_TOV 8
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -301,6 +306,7 @@ struct ddb_entry {
struct qla_ddb_index {
struct list_head list;
uint16_t fw_ddb_idx;
+ uint16_t flash_ddb_idx;
struct dev_db_entry fw_ddb;
uint8_t flash_isid[6];
};
@@ -335,6 +341,7 @@ struct ql4_tuple_ddb {
#define DF_BOOT_TGT 1 /* Boot target entry */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
+#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */
enum qla4_work_type {
QLA4_EVENT_AEN,
@@ -557,6 +564,7 @@ struct scsi_qla_host {
#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
+#define DPC_RESTORE_ACB 24 /* 0x01000000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -734,12 +742,9 @@ struct scsi_qla_host {
struct iscsi_iface *iface_ipv6_1;
/* --- From About Firmware --- */
- uint16_t iscsi_major;
- uint16_t iscsi_minor;
- uint16_t bootload_major;
- uint16_t bootload_minor;
- uint16_t bootload_patch;
- uint16_t bootload_build;
+ struct about_fw_info fw_info;
+ uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */
+ uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */
uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
@@ -780,9 +785,11 @@ struct scsi_qla_host {
uint32_t *reg_tbl;
struct qla4_83xx_reset_template reset_tmplt;
struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
- for ISP8324 */
+ for ISP8324 and
+ and ISP8042 */
uint32_t pf_bit;
struct qla4_83xx_idc_information idc_info;
+ struct addr_ctrl_blk *saved_acb;
};
struct ql4_task_data {
@@ -850,9 +857,14 @@ static inline int is_qla8032(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
}
+static inline int is_qla8042(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042;
+}
+
static inline int is_qla80XX(struct scsi_qla_host *ha)
{
- return is_qla8022(ha) || is_qla8032(ha);
+ return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha);
}
static inline int is_aer_supported(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c7b8892b5a83..1243e5942b76 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -458,6 +458,7 @@ struct qla_flt_region {
#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
#define MBOX_CMD_IDC_ACK 0x0101
+#define MBOX_CMD_IDC_TIME_EXTEND 0x0102
#define MBOX_CMD_PORT_RESET 0x0120
#define MBOX_CMD_SET_PORT_CONFIG 0x0122
@@ -502,6 +503,7 @@ struct qla_flt_region {
#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
#define MBOX_ASTS_IDC_COMPLETE 0x8100
#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
+#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102
#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -512,6 +514,10 @@ struct qla_flt_region {
#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+/* ACB Configuration Defines */
+#define ACB_CONFIG_DISABLE 0x00
+#define ACB_CONFIG_SET 0x01
+
/* ACB State Defines */
#define ACB_STATE_UNCONFIGURED 0x00
#define ACB_STATE_INVALID 0x01
@@ -533,6 +539,10 @@ struct qla_flt_region {
#define ENABLE_INTERNAL_LOOPBACK 0x04
#define ENABLE_EXTERNAL_LOOPBACK 0x08
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE 0
+#define QL4_PARAM_ENABLE 1
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
@@ -955,7 +965,7 @@ struct about_fw_info {
uint16_t bootload_minor; /* 46 - 47 */
uint16_t bootload_patch; /* 48 - 49 */
uint16_t bootload_build; /* 4A - 4B */
- uint8_t reserved2[180]; /* 4C - FF */
+ uint8_t extended_timestamp[180];/* 4C - FF */
};
struct crash_record {
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 4a428009f699..5cef2527180a 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi);
void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
@@ -266,6 +268,14 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr);
int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
char *password, uint16_t chap_index);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len);
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
+int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
+ uint64_t addr, uint32_t *data, uint32_t count);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 8fc8548ba4ba..7456eeb2e58a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -107,7 +107,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
(unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
writel(0,
(unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0,
(unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
writel(0,
@@ -940,7 +940,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
* while switching from polling to interrupt mode. IOCB interrupts are
* enabled via isp_ops->enable_intrs.
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
qla4_83xx_enable_mbox_intrs(ha);
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 6f4decd44c6a..655b7bb644d9 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha)
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+ int type;
+
+ if (chap_entry->flags & BIT_7)
+ type = LOCAL_CHAP;
+ else
+ type = BIDI_CHAP;
+
+ return type;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index fad71ed067ec..e5697ab144d2 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 482287f4005f..7dff09f09b71 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -588,7 +588,7 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
{
int rval = 1;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
(ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -621,7 +621,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
__le32 __iomem *mailbox_out;
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
else if (is_qla8022(ha))
mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
@@ -665,7 +665,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
qla4xxx_dump_registers(ha);
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
} else {
@@ -744,17 +745,23 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
* mbox_sts[3] = new ACB state */
if ((mbox_sts[3] == ACB_STATE_VALID) &&
((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
- (mbox_sts[2] == ACB_STATE_ACQUIRING)))
+ (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
- else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID)) {
+ } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
+ (mbox_sts[2] == ACB_STATE_VALID)) {
if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
- } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ } else if (mbox_sts[3] == ACB_STATE_DISABLING) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
+ ha->host_no, __func__);
+ } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
complete(&ha->disable_acb_comp);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
+ ha->host_no, __func__);
+ }
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
@@ -836,7 +843,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
{
uint32_t opcode;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
ha->host_no, mbox_sts[0],
@@ -858,7 +865,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
case MBOX_ASTS_IDC_COMPLETE:
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
ha->host_no, mbox_sts[0],
@@ -868,10 +875,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
- if (qla4_83xx_loopback_in_progress(ha))
+ if (qla4_83xx_loopback_in_progress(ha)) {
set_bit(AF_LOOPBACK, &ha->flags);
- else
+ } else {
clear_bit(AF_LOOPBACK, &ha->flags);
+ if (ha->saved_acb)
+ set_bit(DPC_RESTORE_ACB,
+ &ha->dpc_flags);
+ }
+ qla4xxx_wake_dpc(ha);
}
break;
@@ -886,6 +898,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
ha->host_no, mbox_sts[0]));
break;
+ case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+
case MBOX_ASTS_INITIALIZATION_FAILED:
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
@@ -1297,7 +1320,7 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
uint32_t intr_status;
uint8_t reqs_count = 0;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_mailbox_intr_handler(irq, dev_id);
} else {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1334,7 +1357,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
uint32_t ival = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
if (ival == 0) {
ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
@@ -1425,10 +1448,10 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
goto try_intx;
if (ql4xenablemsix == 2) {
- /* Note: MSI Interrupts not supported for ISP8324 */
- if (is_qla8032(ha)) {
- ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
- __func__);
+ /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
+ __func__, ha->pdev->device);
goto try_intx;
}
goto try_msi;
@@ -1444,9 +1467,9 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
"MSI-X: Enabled (0x%X).\n", ha->revision_id));
goto irq_attached;
} else {
- if (is_qla8032(ha)) {
- ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
- __func__, ret);
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
+ __func__, ha->pdev->device, ret);
goto try_intx;
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index a501beab3ffe..22cbd005bdf4 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,10 +1,11 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
+#include <linux/ctype.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
@@ -52,7 +53,7 @@ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
{
int rval = 1;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
rval = 0;
@@ -223,7 +224,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
__func__);
qla4_83xx_disable_pause(ha);
@@ -1270,16 +1271,28 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
}
/* Save version information. */
- ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
- ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
- ha->patch_number = le16_to_cpu(about_fw->fw_patch);
- ha->build_number = le16_to_cpu(about_fw->fw_build);
- ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
- ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
- ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
- ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
- ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
- ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
+ ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
+ ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
+ ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
+ memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
+ sizeof(about_fw->fw_build_date));
+ memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
+ sizeof(about_fw->fw_build_time));
+ strcpy((char *)ha->fw_info.fw_build_user,
+ skip_spaces((char *)about_fw->fw_build_user));
+ ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
+ ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
+ strcpy((char *)ha->fw_info.extended_timestamp,
+ skip_spaces((char *)about_fw->extended_timestamp));
+
+ ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
+ ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
status = QLA_SUCCESS;
exit_about_fw:
@@ -1517,13 +1530,26 @@ exit_get_chap:
return ret;
}
-static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
- char *password, uint16_t idx, int bidi)
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+ uint16_t idx, int bidi)
{
int ret = 0;
int rval = QLA_ERROR;
uint32_t offset = 0;
struct ql4_chap_table *chap_table;
+ uint32_t chap_size = 0;
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
@@ -1541,7 +1567,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
- offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
+
+ if (is_qla40XX(ha)) {
+ chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+ offset = FLASH_CHAP_OFFSET;
+ } else { /* Single region contains CHAP info for both ports which is
+ * divided into half for each port.
+ */
+ chap_size = ha->hw.flt_chap_size / 2;
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ if (ha->port_num == 1)
+ offset += chap_size;
+ }
+
+ offset += (idx * sizeof(struct ql4_chap_table));
rval = qla4xxx_set_flash(ha, chap_dma, offset,
sizeof(struct ql4_chap_table),
FLASH_OPT_RMW_COMMIT);
@@ -1598,7 +1637,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap;
}
- if (!(chap_table->flags & BIT_6)) {
+ if (!(chap_table->flags & BIT_7)) {
ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
@@ -1723,6 +1762,45 @@ int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
return status;
}
+/**
+ * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
+ * @ha: Pointer to host adapter structure.
+ * @ext_tmo: idc timeout value
+ *
+ * Requests firmware to extend the idc timeout value.
+ **/
+static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ ext_tmo &= 0xf;
+
+ mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
+ mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
+ (ext_tmo << 8)); /* new timeout */
+ mbox_cmd[2] = ha->idc_info.info1;
+ mbox_cmd[3] = ha->idc_info.info2;
+ mbox_cmd[4] = ha->idc_info.info3;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: failed status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
+ __func__, ext_tmo);
+ }
+
+ return QLA_SUCCESS;
+}
+
int qla4xxx_disable_acb(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
@@ -1739,6 +1817,23 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
"failed w/ status %04X %04X %04X", __func__,
mbox_sts[0], mbox_sts[1], mbox_sts[2]));
+ } else {
+ if (is_qla8042(ha) &&
+ (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
+ /*
+ * Disable ACB mailbox command takes time to complete
+ * based on the total number of targets connected.
+ * For 512 targets, it took approximately 5 secs to
+ * complete. Setting the timeout value to 8, with the 3
+ * secs buffer.
+ */
+ qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
+ if (!wait_for_completion_timeout(&ha->disable_acb_comp,
+ IDC_EXTEND_TOV * HZ)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
+ __func__);
+ }
+ }
}
return status;
}
@@ -2145,8 +2240,80 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
mbox_sts[0]);
else
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
- __func__));
+ ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
return status;
}
+
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct addr_ctrl_blk *acb = NULL;
+ uint32_t acb_len = sizeof(struct addr_ctrl_blk);
+ int rval = QLA_SUCCESS;
+ dma_addr_t acb_dma;
+
+ acb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_config_acb;
+ }
+ memset(acb, 0, acb_len);
+
+ switch (acb_config) {
+ case ACB_CONFIG_DISABLE:
+ rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ if (!ha->saved_acb)
+ ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_config_acb;
+ }
+ memcpy(ha->saved_acb, acb, acb_len);
+ break;
+ case ACB_CONFIG_SET:
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_free_acb;
+ }
+
+ memcpy(acb, ha->saved_acb, acb_len);
+ kfree(ha->saved_acb);
+ ha->saved_acb = NULL;
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
+ __func__);
+ }
+
+exit_free_acb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
+ acb_dma);
+exit_config_acb:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s %s\n", __func__,
+ rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+ return rval;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 325db1f2c091..3bf418fbd432 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index dba0514d1c70..e97d79ff16f7 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index eaf00c162eb2..d001202d3565 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -1514,11 +1514,11 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_active |= (1 << ha->func_num);
else
drv_active |= (1 << (ha->func_num * 4));
@@ -1536,11 +1536,11 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_active &= ~(1 << (ha->func_num));
else
drv_active &= ~(1 << (ha->func_num * 4));
@@ -1559,11 +1559,11 @@ inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
rval = drv_state & (1 << ha->func_num);
else
rval = drv_state & (1 << (ha->func_num * 4));
@@ -1581,11 +1581,11 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_state |= (1 << ha->func_num);
else
drv_state |= (1 << (ha->func_num * 4));
@@ -1602,11 +1602,11 @@ void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_state &= ~(1 << ha->func_num);
else
drv_state &= ~(1 << (ha->func_num * 4));
@@ -1624,11 +1624,11 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function.
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
qsnt_state |= (1 << ha->func_num);
else
qsnt_state |= (2 << (ha->func_num * 4));
@@ -1737,6 +1737,208 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
*d_ptr = data_ptr;
}
+static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+
+ if (rval)
+ return QLA_ERROR;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+ else
+ return QLA_ERROR;
+}
+
+static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+ else
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
+ rval = QLA_ERROR;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int rval = QLA_SUCCESS;
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla4_83xx_pex_dma_descriptor dma_desc;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+
+ rval = qla4_83xx_check_dma_engine_state(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DMA engine not available. Fallback to rdmem-read.\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+ dma_desc.dma_bus_addr = rdmem_dma;
+
+ size = 0;
+ read_size = 0;
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size >=
+ QLA83XX_PEX_DMA_READ_SIZE)
+ size = QLA83XX_PEX_DMA_READ_SIZE;
+ else {
+ size = (m_hdr->read_data_size - read_size);
+
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
+ &rdmem_dma,
+ GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+ dma_desc.dma_bus_addr = rdmem_dma;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+ dma_desc.cmd.read_data_size = size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla4_83xx_ms_mem_write_128b(ha,
+ (uint64_t)m_hdr->desc_card_addr,
+ (uint32_t *)&dma_desc,
+ (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
+ if (rval == -1) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
+ __func__, size));
+ /* Execute: Start pex-dma operation. */
+ rval = qla4_83xx_start_pex_dma(ha, m_hdr);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): start-pex-dma failed rval=0x%x\n",
+ ha->host_no, rval));
+ goto error_exit;
+ }
+
+ memcpy(data_ptr, rdmem_buffer, size);
+ data_ptr += size;
+ read_size += size;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+
+ *d_ptr = (uint32_t *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
+ rdmem_dma);
+
+ return rval;
+}
+
static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
@@ -2068,7 +2270,7 @@ static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
-static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
@@ -2150,6 +2352,28 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
return QLA_SUCCESS;
}
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t *data_ptr = *d_ptr;
+ int rval = QLA_SUCCESS;
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ }
+ } else {
+ rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ }
+ *d_ptr = data_ptr;
+ return rval;
+}
+
static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
int index)
@@ -2398,13 +2622,13 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
(((uint8_t *)ha->fw_dump_tmplt_hdr) +
tmplt_hdr->first_entry_offset);
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
tmplt_hdr->ocm_window_reg[ha->func_num];
/* Walk through the entry headers - validate/perform required action */
for (i = 0; i < num_entry_hdr; i++) {
- if (data_collected >= ha->fw_dump_size) {
+ if (data_collected > ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Data collected: [0x%x], Total Dump size: [0x%x]\n",
data_collected, ha->fw_dump_size);
@@ -2455,7 +2679,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
&data_ptr);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
rval = qla4_83xx_minidump_process_rdrom(ha,
entry_hdr,
&data_ptr);
@@ -2496,7 +2720,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
&data_ptr);
break;
case QLA83XX_POLLRD:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2506,7 +2730,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
case QLA83XX_RDMUX2:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2514,7 +2738,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
&data_ptr);
break;
case QLA83XX_POLLRDMWR:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2529,9 +2753,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
break;
}
- data_collected = (uint8_t *)data_ptr -
- ((uint8_t *)((uint8_t *)ha->fw_dump +
- ha->fw_dump_tmplt_size));
+ data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
skip_nxt_entry:
/* next entry in the template */
entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
@@ -2539,10 +2761,11 @@ skip_nxt_entry:
entry_hdr->entry_size);
}
- if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+ if (data_collected != ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
data_collected, ha->fw_dump_size);
+ rval = QLA_ERROR;
goto md_failed;
}
@@ -2642,10 +2865,10 @@ dev_initialize:
QLA8XXX_DEV_INITIALIZING);
/*
- * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
- * device goes to INIT state.
+ * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
+ * reset it after device goes to INIT state.
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
@@ -2846,7 +3069,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
* If we are the first driver to load and
* ql4xdontresethba is not set, clear IDC_CTRL BIT0.
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
qla4_83xx_clear_idc_dontreset(ha);
@@ -2854,7 +3077,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_set_idc_ver(ha);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
rval = qla4_83xx_set_idc_ver(ha);
if (rval == QLA_ERROR)
qla4_8xxx_clear_drv_active(ha);
@@ -2922,11 +3145,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
break;
case QLA8XXX_DEV_NEED_RESET:
/*
- * For ISP8324, if NEED_RESET is set by any driver,
- * it should be honored, irrespective of IDC_CTRL
- * DONTRESET_BIT0
+ * For ISP8324 and ISP8042, if NEED_RESET is set by any
+ * driver, it should be honored, irrespective of
+ * IDC_CTRL DONTRESET_BIT0
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_need_reset_handler(ha);
} else if (is_qla8022(ha)) {
if (!ql4xdontresethba) {
@@ -2976,7 +3199,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
int retval;
/* clear the interrupt */
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0, &ha->qla4_83xx_reg->risc_intr);
readl(&ha->qla4_83xx_reg->risc_intr);
} else if (is_qla8022(ha)) {
@@ -3094,7 +3317,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
if (is_qla8022(ha)) {
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
flt_addr << 2, OPTROM_BURST_SIZE);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
(uint8_t *)ha->request_ring,
0x400);
@@ -3326,7 +3549,7 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_get_fdt_info(ha);
qla4_82xx_get_idc_param(ha);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_get_idc_param(ha);
}
@@ -3436,7 +3659,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Make sure we receive the minimum required data to cache internally */
- if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+ if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 9dc0bbfe50d5..14500a0f62cc 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b246b3c26912..6dc3e99b7f9c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -149,6 +149,8 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf);
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
+ int len);
/*
* SCSI host template entry points
@@ -252,6 +254,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.send_ping = qla4xxx_send_ping,
.get_chap = qla4xxx_get_chap_list,
.delete_chap = qla4xxx_delete_chap,
+ .set_chap = qla4xxx_set_chap_entry,
.get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
.set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
.new_flashnode = qla4xxx_sysfs_ddb_add,
@@ -378,6 +381,44 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_PASSWORD:
case ISCSI_PARAM_USERNAME_IN:
case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ case ISCSI_PARAM_PORTAL_TYPE:
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ case ISCSI_PARAM_IPV4_TOS:
+ case ISCSI_PARAM_IPV6_TC:
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ case ISCSI_PARAM_LOCAL_PORT:
+ case ISCSI_PARAM_ISID:
+ case ISCSI_PARAM_TSID:
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_STATSN:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
return S_IRUGO;
default:
return 0;
@@ -470,6 +511,95 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
return 0;
}
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+ int16_t chap_index,
+ struct ql4_chap_table **chap_entry)
+{
+ int rval = QLA_ERROR;
+ int max_chap_entries;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_index > max_chap_entries) {
+ ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+ rval = QLA_ERROR;
+ goto exit_get_chap;
+ }
+
+ *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+ if ((*chap_entry)->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ rval = QLA_ERROR;
+ *chap_entry = NULL;
+ } else {
+ rval = QLA_SUCCESS;
+ }
+
+exit_get_chap:
+ return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+ uint16_t *chap_index)
+{
+ int i, rval;
+ int free_index = -1;
+ int max_chap_entries = 0;
+ struct ql4_chap_table *chap_table;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+ rval = QLA_ERROR;
+ goto exit_find_chap;
+ }
+
+ for (i = 0; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+ if ((chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+ (i > MAX_RESRV_CHAP_IDX)) {
+ free_index = i;
+ break;
+ }
+ }
+
+ if (free_index != -1) {
+ *chap_index = free_index;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+
+exit_find_chap:
+ return rval;
+}
+
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf)
{
@@ -653,6 +783,111 @@ exit_delete_chap:
return ret;
}
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_chap_rec chap_rec;
+ struct ql4_chap_table *chap_entry = NULL;
+ struct iscsi_param_info *param_info;
+ struct nlattr *attr;
+ int max_chap_entries = 0;
+ int type;
+ int rem = len;
+ int rc = 0;
+
+ memset(&chap_rec, 0, sizeof(chap_rec));
+
+ nla_for_each_attr(attr, data, len, rem) {
+ param_info = nla_data(attr);
+
+ switch (param_info->param) {
+ case ISCSI_CHAP_PARAM_INDEX:
+ chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+ break;
+ case ISCSI_CHAP_PARAM_CHAP_TYPE:
+ chap_rec.chap_type = param_info->value[0];
+ break;
+ case ISCSI_CHAP_PARAM_USERNAME:
+ memcpy(chap_rec.username, param_info->value,
+ param_info->len);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD:
+ memcpy(chap_rec.password, param_info->value,
+ param_info->len);
+ break;
+ case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+ chap_rec.password_length = param_info->value[0];
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "%s: No such sysfs attribute\n", __func__);
+ rc = -ENOSYS;
+ goto exit_set_chap;
+ };
+ }
+
+ if (chap_rec.chap_type == CHAP_TYPE_IN)
+ type = BIDI_CHAP;
+ else
+ type = LOCAL_CHAP;
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ mutex_lock(&ha->chap_sem);
+ if (chap_rec.chap_tbl_idx < max_chap_entries) {
+ rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+ &chap_entry);
+ if (!rc) {
+ if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+ ql4_printk(KERN_INFO, ha,
+ "Type mismatch for CHAP entry %d\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EINVAL;
+ goto exit_unlock_chap;
+ }
+
+ /* If chap index is in use then don't modify it */
+ rc = qla4xxx_is_chap_active(shost,
+ chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha,
+ "CHAP entry %d is in use\n",
+ chap_rec.chap_tbl_idx);
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+ } else {
+ rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+ if (rc) {
+ ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+ rc = -EBUSY;
+ goto exit_unlock_chap;
+ }
+ }
+
+ rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+ chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+ mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+ return rc;
+}
+
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf)
@@ -1417,9 +1652,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
struct iscsi_session *sess = cls_sess->dd_data;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
+ struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+ struct ql4_chap_table chap_tbl;
int rval, len;
uint16_t idx;
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
switch (param) {
case ISCSI_PARAM_CHAP_IN_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username_in,
@@ -1431,14 +1669,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
- rval = qla4xxx_get_chap_index(ha, sess->username,
- sess->password, LOCAL_CHAP,
- &idx);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = QLA_SUCCESS;
+ } else {
+ rval = QLA_ERROR;
+ }
+ } else {
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password,
+ LOCAL_CHAP, &idx);
+ }
if (rval)
len = sprintf(buf, "\n");
else
len = sprintf(buf, "%hu\n", idx);
break;
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ /* First, populate session username and password for FLASH DDB,
+ * if not already done. This happens when session login fails
+ * for a FLASH DDB.
+ */
+ if (ddb_entry->ddb_type == FLASH_DDB &&
+ ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+ !sess->username && !sess->password) {
+ idx = ddb_entry->chap_tbl_idx;
+ rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ idx);
+ if (!rval) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
+ /* allow fall-through */
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
@@ -2218,19 +2488,23 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
- fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
- fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
+ fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
fw_ddb_entry->port = cpu_to_le16(conn->port);
fw_ddb_entry->def_timeout =
cpu_to_le16(sess->default_taskmgmt_timeout);
+ if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
+ else
+ fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+
if (conn->ipaddress)
memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
sizeof(fw_ddb_entry->ip_addr));
@@ -2257,6 +2531,96 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
return rc;
}
+static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
+ struct iscsi_session *sess,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ unsigned long options = 0;
+ uint16_t ddb_link;
+ uint16_t disc_parent;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+ sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+ &options);
+ sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+ conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+ sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+ sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+ sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+ &options);
+ sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+ sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+ sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+ &options);
+ sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+ sess->discovery_auth_optional =
+ test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+ if (test_bit(ISCSIOPT_ERL1, &options))
+ sess->erl |= BIT_1;
+ if (test_bit(ISCSIOPT_ERL0, &options))
+ sess->erl |= BIT_0;
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+ conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+ conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+ if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+ conn->tcp_timer_scale |= BIT_3;
+ if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+ conn->tcp_timer_scale |= BIT_2;
+ if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+ conn->tcp_timer_scale |= BIT_1;
+
+ conn->tcp_timer_scale >>= 1;
+ conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+ conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+ conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+ conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+ conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
+ conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+ conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+ conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+ sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+ COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link == DDB_ISNS)
+ disc_parent = ISCSI_DISC_PARENT_ISNS;
+ else if (ddb_link == DDB_NO_LINK)
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+ else if (ddb_link < MAX_DDB_ENTRIES)
+ disc_parent = ISCSI_DISC_PARENT_SENDTGT;
+ else
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
+ iscsi_get_discovery_parent_name(disc_parent), 0);
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+ (char *)fw_ddb_entry->iscsi_alias, 0);
+}
+
static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
struct iscsi_cls_session *cls_sess,
@@ -2265,6 +2629,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
int buflen = 0;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
+ struct ql4_chap_table chap_tbl;
struct iscsi_conn *conn;
char ip_addr[DDB_IPADDR_LEN];
uint16_t options = 0;
@@ -2272,50 +2637,46 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
conn = cls_conn->dd_data;
+ memset(&chap_tbl, 0, sizeof(chap_tbl));
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
- conn->max_recv_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
-
- conn->max_xmit_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
-
- sess->initial_r2t_en =
- (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
-
- sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->first_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
-
- sess->max_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
-
- sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
-
- sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+ sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
- sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
-
+ memset(ip_addr, 0, sizeof(ip_addr));
options = le16_to_cpu(fw_ddb_entry->options);
- if (options & DDB_OPT_IPV6_DEVICE)
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
+
+ memset(ip_addr, 0, sizeof(ip_addr));
sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
- else
+ } else {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+ }
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
(char *)fw_ddb_entry->iscsi_name, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
(char *)ha->name_string, buflen);
- iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
- (char *)ip_addr, buflen);
- iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
- (char *)fw_ddb_entry->iscsi_alias, buflen);
+
+ if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+ if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+ chap_tbl.secret,
+ ddb_entry->chap_tbl_idx)) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+ (char *)chap_tbl.name,
+ strlen((char *)chap_tbl.name));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+ (char *)chap_tbl.secret,
+ chap_tbl.secret_len);
+ }
+ }
}
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -2403,37 +2764,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
/* Update params */
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
- conn->max_recv_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
-
- conn->max_xmit_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
-
- sess->initial_r2t_en =
- (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
-
- sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->first_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
-
- sess->max_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
-
- sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
-
- sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
-
- sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
- iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
- (char *)fw_ddb_entry->iscsi_alias, 0);
-
exit_session_conn_param:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2578,6 +2913,8 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
test_bit(AF_LOOPBACK, &ha->flags) ||
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -2652,7 +2989,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
if (ha->nx_pcibase)
iounmap(
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
if (ha->nx_pcibase)
iounmap(
(struct device_reg_83xx __iomem *)ha->nx_pcibase);
@@ -2846,7 +3183,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
__func__);
if (halt_status & HALT_STATUS_UNRECOVERABLE)
halt_status_unrecoverable = 1;
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
__func__);
@@ -2901,7 +3238,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
__func__);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
idc_ctrl = qla4_83xx_rd_reg(ha,
QLA83XX_IDC_DRV_CTRL);
if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
@@ -2912,7 +3249,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
- if (is_qla8032(ha) ||
+ if ((is_qla8032(ha) || is_qla8042(ha)) ||
(is_qla8022(ha) && !ql4xdontresethba)) {
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
@@ -3296,7 +3633,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
- if (is_qla8032(ha) &&
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
!test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
__func__);
@@ -3494,7 +3831,9 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
} else {
/* Trigger relogin */
if (ddb_entry->ddb_type == FLASH_DDB) {
- if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
+ test_bit(DF_DISABLE_RELOGIN,
+ &ddb_entry->flags)))
qla4xxx_arm_relogin_timer(ddb_entry);
} else
iscsi_session_failure(cls_session->dd_data,
@@ -3597,6 +3936,9 @@ static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
if (!(ddb_entry->ddb_type == FLASH_DDB))
return;
+ if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ return;
+
if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
!iscsi_is_session_online(cls_sess)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3750,7 +4092,7 @@ static void qla4xxx_do_dpc(struct work_struct *work)
if (is_qla80XX(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
__func__);
/* disable pause frame for ISP83xx */
@@ -3765,8 +4107,35 @@ static void qla4xxx_do_dpc(struct work_struct *work)
qla4_8xxx_device_state_handler(ha);
}
- if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
+ if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
+ if (is_qla8042(ha)) {
+ if (ha->idc_info.info2 &
+ ENABLE_INTERNAL_LOOPBACK) {
+ ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
+ __func__);
+ status = qla4_84xx_config_acb(ha,
+ ACB_CONFIG_DISABLE);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
+ __func__);
+ }
+ }
+ }
qla4_83xx_post_idc_ack(ha);
+ clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
+ }
+
+ if (is_qla8042(ha) &&
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
+ __func__);
+ if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
+ QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
+ __func__);
+ }
+ clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
+ }
if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
qla4_8xxx_need_qsnt_handler(ha);
@@ -3778,7 +4147,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3870,7 +4240,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
} else if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
readl(&ha->qla4_82xx_reg->host_int);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0, &ha->qla4_83xx_reg->risc_intr);
readl(&ha->qla4_83xx_reg->risc_intr);
}
@@ -3945,7 +4315,7 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
(ha->pdev->devfn << 11));
ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
QLA82XX_CAM_RAM_DB2);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
((uint8_t *)ha->nx_pcibase);
}
@@ -4809,7 +5179,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
}
static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
- struct dev_db_entry *fw_ddb_entry)
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t *index)
{
struct ddb_entry *ddb_entry;
struct ql4_tuple_ddb *fw_tddb = NULL;
@@ -4843,6 +5214,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
ret = QLA_SUCCESS; /* found */
+ if (index != NULL)
+ *index = idx;
goto exit_check;
}
}
@@ -5078,6 +5451,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
ddb_entry->ha = ha;
ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+ ddb_entry->chap_tbl_idx = INVALID_ENTRY;
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
@@ -5139,6 +5513,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies));
}
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+ struct dev_db_entry *flash_ddb_entry)
+{
+ uint16_t options = 0;
+ size_t ip_len = IP_ADDR_LEN;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ ip_len = IPv6_ADDR_LEN;
+
+ if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+ sizeof(fw_ddb_entry->isid)))
+ return QLA_ERROR;
+
+ if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+ sizeof(fw_ddb_entry->port)))
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint32_t fw_idx, uint32_t *flash_index)
+{
+ struct dev_db_entry *flash_ddb_entry;
+ dma_addr_t flash_ddb_entry_dma;
+ uint32_t idx = 0;
+ int max_ddbs;
+ int ret = QLA_ERROR, status;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &flash_ddb_entry_dma);
+ if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "Out of memory\n");
+ goto exit_find_st_idx;
+ }
+
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, fw_idx);
+ if (status == QLA_SUCCESS) {
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = fw_idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+ flash_ddb_entry_dma, idx);
+ if (status == QLA_ERROR)
+ continue;
+
+ status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+ if (status == QLA_SUCCESS) {
+ *flash_index = idx;
+ ret = QLA_SUCCESS;
+ goto exit_find_st_idx;
+ }
+ }
+
+ if (idx == max_ddbs)
+ ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+ fw_idx);
+
+exit_find_st_idx:
+ if (flash_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+ flash_ddb_entry_dma);
+
+ return ret;
+}
+
static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
struct list_head *list_st)
{
@@ -5150,6 +5605,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
+ uint32_t flash_index = -1;
uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5182,6 +5638,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
if (!st_ddb_idx)
break;
+ ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+ &flash_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha,
+ "No flash entry for ST at idx [%d]\n", idx);
+ st_ddb_idx->flash_ddb_idx = idx;
+ } else {
+ ql4_printk(KERN_INFO, ha,
+ "ST at idx [%d] is stored at flash [%d]\n",
+ idx, flash_index);
+ st_ddb_idx->flash_ddb_idx = flash_index;
+ }
+
st_ddb_idx->fw_ddb_idx = idx;
list_add_tail(&st_ddb_idx->list, list_st);
@@ -5226,6 +5695,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
}
}
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ uint32_t max_ddbs = 0;
+ uint16_t ddb_link = -1;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ sess->discovery_parent_idx = ddb_link;
+ else
+ sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
int is_reset, uint16_t idx)
@@ -5290,6 +5781,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
/* Update sess/conn params */
qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+ qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess);
@@ -5306,17 +5798,43 @@ exit_setup:
return ret;
}
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+ struct list_head *list_ddb,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint16_t ddb_link;
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ if (ddb_idx->fw_ddb_idx == ddb_link) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Updating NT parent idx from [%d] to [%d]\n",
+ ddb_link, ddb_idx->flash_ddb_idx));
+ fw_ddb_entry->ddb_link =
+ cpu_to_le16(ddb_idx->flash_ddb_idx);
+ return;
+ }
+ }
+}
+
static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
- struct list_head *list_nt, int is_reset)
+ struct list_head *list_nt,
+ struct list_head *list_st,
+ int is_reset)
{
struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
dma_addr_t fw_ddb_dma;
int max_ddbs;
int fw_idx_size;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
+ uint32_t ddb_idx = -1;
uint16_t conn_id = 0;
+ uint16_t ddb_link = -1;
struct qla_ddb_index *nt_ddb_idx;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5343,12 +5861,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < max_ddbs)
+ qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED))
+ state == DDB_DS_SESSION_FAILED) &&
+ (is_reset == INIT_ADAPTER))
goto continue_next_nt;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Adding DDB to session = 0x%x\n", idx));
+
if (is_reset == INIT_ADAPTER) {
nt_ddb_idx = vmalloc(fw_idx_size);
if (!nt_ddb_idx)
@@ -5378,9 +5902,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
list_add_tail(&nt_ddb_idx->list, list_nt);
} else if (is_reset == RESET_ADAPTER) {
- if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
- QLA_SUCCESS)
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+ &ddb_idx);
+ if (ret == QLA_SUCCESS) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+ ddb_idx);
+ if (ddb_entry != NULL)
+ qla4xxx_update_sess_disc_idx(ha,
+ ddb_entry,
+ fw_ddb_entry);
goto continue_next_nt;
+ }
}
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
@@ -5398,7 +5930,8 @@ exit_nt_list:
}
static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
- struct list_head *list_nt)
+ struct list_head *list_nt,
+ uint16_t target_id)
{
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_dma;
@@ -5443,13 +5976,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
nt_ddb_idx->fw_ddb_idx = idx;
- ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret == QLA_SUCCESS) {
/* free nt_ddb_idx and do not add to list_nt */
vfree(nt_ddb_idx);
goto continue_next_new_nt;
}
+ if (target_id < max_ddbs)
+ fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
list_add_tail(&nt_ddb_idx->list, list_nt);
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
@@ -5609,7 +6145,8 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
goto exit_ddb_add;
}
- for (idx = 0; idx < max_ddbs; idx++) {
+ /* Index 0 and 1 are reserved for boot target entries */
+ for (idx = 2; idx < max_ddbs; idx++) {
if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
fw_ddb_entry_dma, idx))
break;
@@ -5765,7 +6302,8 @@ exit_ddb_conn_open:
}
static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
- struct dev_db_entry *fw_ddb_entry)
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t target_id)
{
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
struct list_head list_nt;
@@ -5790,7 +6328,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
if (ret == QLA_ERROR)
goto exit_login_st;
- qla4xxx_build_new_nt_list(ha, &list_nt);
+ qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
list_del_init(&ddb_idx->list);
@@ -5817,7 +6355,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
{
int ret = QLA_ERROR;
- ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret != QLA_SUCCESS)
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
idx);
@@ -5872,7 +6410,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
fw_ddb_entry->cookie = DDB_VALID_COOKIE;
if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
- ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+ ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+ fnode_sess->target_id);
else
ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
fnode_sess->target_id);
@@ -5925,13 +6464,6 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
goto exit_ddb_logout;
}
- options = LOGOUT_OPTION_CLOSE_SESSION;
- if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
- ret = -EIO;
- goto exit_ddb_logout;
- }
-
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
@@ -5941,6 +6473,38 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
goto exit_ddb_logout;
}
+ if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto ddb_logout_init;
+
+ if (ddb_state == DDB_DS_SESSION_ACTIVE)
+ goto ddb_logout_init;
+
+ /* wait until next relogin is triggered using DF_RELOGIN and
+ * clear DF_RELOGIN to avoid invocation of further relogin
+ */
+ wtime = jiffies + (HZ * RELOGIN_TOV);
+ do {
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+ddb_logout_init:
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ qla4xxx_session_logout_ddb(ha, ddb_entry, options);
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
wtime = jiffies + (HZ * LOGOUT_TOV);
do {
ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
@@ -5970,10 +6534,12 @@ ddb_logout_clr_sess:
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
iscsi_session_teardown(ddb_entry->sess);
+ clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
ret = QLA_SUCCESS;
exit_ddb_logout:
@@ -6110,7 +6676,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_bus_flash_conn *fnode_conn;
struct ql4_chap_table chap_tbl;
struct device *dev;
- int parent_type, parent_index = 0xffff;
+ int parent_type;
int rc = 0;
dev = iscsi_find_flashnode_conn(fnode_sess);
@@ -6276,10 +6842,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "\n");
break;
case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
- if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
- parent_index = fnode_sess->discovery_parent_idx;
-
- rc = sprintf(buf, "%u\n", parent_index);
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
break;
case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
if (fnode_sess->discovery_parent_type == DDB_ISNS)
@@ -6369,10 +6932,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_flashnode_param_info *fnode_param;
+ struct ql4_chap_table chap_tbl;
struct nlattr *attr;
+ uint16_t chap_out_idx = INVALID_ENTRY;
int rc = QLA_ERROR;
uint32_t rem = len;
+ memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
fnode_param = nla_data(attr);
@@ -6414,6 +6980,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
break;
case ISCSI_FLASHNODE_CHAP_AUTH_EN:
fnode_sess->chap_auth_en = fnode_param->value[0];
+ /* Invalidate chap index if chap auth is disabled */
+ if (!fnode_sess->chap_auth_en)
+ fnode_sess->chap_out_idx = INVALID_ENTRY;
+
break;
case ISCSI_FLASHNODE_SNACK_REQ_EN:
fnode_conn->snack_req_en = fnode_param->value[0];
@@ -6533,8 +7103,8 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memcpy(fnode_conn->link_local_ipv6_addr,
fnode_param->value, IPv6_ADDR_LEN);
break;
- case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
- fnode_sess->discovery_parent_type =
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ fnode_sess->discovery_parent_idx =
*(uint16_t *)fnode_param->value;
break;
case ISCSI_FLASHNODE_TCP_XMIT_WSF:
@@ -6552,6 +7122,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
fnode_conn->exp_statsn =
*(uint32_t *)fnode_param->value;
break;
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ chap_out_idx = *(uint16_t *)fnode_param->value;
+ if (!qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ chap_out_idx)) {
+ fnode_sess->chap_out_idx = chap_out_idx;
+ /* Enable chap auth if chap index is valid */
+ fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+ }
+ break;
default:
ql4_printk(KERN_ERR, ha,
"%s: No such sysfs attribute\n", __func__);
@@ -6773,11 +7354,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(wtime, jiffies));
- /* Free up the sendtargets list */
- qla4xxx_free_ddb_list(&list_st);
- qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+ qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
+ qla4xxx_free_ddb_list(&list_st);
qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
@@ -6910,7 +7490,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
nx_legacy_intr->tgt_status_reg;
ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ha->isp_ops = &qla4_83xx_isp_ops;
ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
} else {
@@ -6981,7 +7561,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
if (is_qla80XX(ha))
qla4_8xxx_get_flash_info(ha);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_read_reset_template(ha);
/*
* NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
@@ -7036,7 +7616,8 @@ skip_retry_init:
ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
/* Put the device in failed state. */
DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
ha->isp_ops->idc_lock(ha);
@@ -7097,8 +7678,8 @@ skip_retry_init:
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
- ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
/* Set the driver version */
if (is_qla80XX(ha))
@@ -7645,16 +8226,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
ha = to_qla_host(cmd->device->host);
- if (is_qla8032(ha) && ql4xdontresethba)
+ if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
/*
- * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
- * protocol drivers, we should not set device_state to
- * NEED_RESET
+ * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
+ * protocol drivers, we should not set device_state to NEED_RESET
*/
if (ql4xdontresethba ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
@@ -7779,9 +8360,10 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
}
recover_adapter:
- /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
+ /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
* reset is issued by application */
- if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
(idc_ctrl | GRACEFUL_RESET_BIT1));
@@ -8078,6 +8660,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index fe873cf7570d..f4fef72c9bcd 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k1"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index eaa808e6ba91..fe0bcb18fb26 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
* Definitions and constants.
*/
-#define MIN_RESET_DELAY (2*HZ)
-
-/* Do not call reset on error if we just did a reset within 15 sec. */
-#define MIN_RESET_PERIOD (15*HZ)
-
/*
* Note - the initial logging level can be set here to log events at boot time.
* After the system is up, you may enable logging via the /proc interface.
@@ -658,7 +653,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial);
int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
- unsigned long timeout;
int rtn = 0;
atomic_inc(&cmd->device->iorequest_cnt);
@@ -704,28 +698,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
(cmd->device->lun << 5 & 0xe0);
}
- /*
- * We will wait MIN_RESET_DELAY clock ticks after the last reset so
- * we can avoid the drive not being ready.
- */
- timeout = host->last_reset + MIN_RESET_DELAY;
-
- if (host->resetting && time_before(jiffies, timeout)) {
- int ticks_remaining = timeout - jiffies;
- /*
- * NOTE: This may be executed from within an interrupt
- * handler! This is bad, but for now, it'll do. The irq
- * level of the interrupt handler has been masked out by the
- * platform dependent interrupt handling code already, so the
- * sti() here will not cause another call to the SCSI host's
- * interrupt handler (assuming there is one irq-level per
- * host).
- */
- while (--ticks_remaining >= 0)
- mdelay(1 + 999 / HZ);
- host->resetting = 0;
- }
-
scsi_log_send(cmd);
/*
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cb4fefa1bfba..80b8b10edf41 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -169,7 +169,7 @@ static int scsi_debug_dix = DEF_DIX;
static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static int scsi_debug_guard = DEF_GUARD;
+static unsigned int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
@@ -293,6 +293,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0};
+static void *fake_store(unsigned long long lba)
+{
+ lba = do_div(lba, sdebug_store_sectors);
+
+ return fake_storep + lba * scsi_debug_sector_size;
+}
+
+static struct sd_dif_tuple *dif_store(sector_t sector)
+{
+ sector = do_div(sector, sdebug_store_sectors);
+
+ return dif_storep + sector;
+}
+
static int sdebug_add_adapter(void);
static void sdebug_remove_adapter(void);
@@ -1731,25 +1745,22 @@ static int do_device_access(struct scsi_cmnd *scmd,
return ret;
}
-static u16 dif_compute_csum(const void *buf, int len)
+static __be16 dif_compute_csum(const void *buf, int len)
{
- u16 csum;
+ __be16 csum;
- switch (scsi_debug_guard) {
- case 1:
- csum = ip_compute_csum(buf, len);
- break;
- case 0:
+ if (scsi_debug_guard)
+ csum = (__force __be16)ip_compute_csum(buf, len);
+ else
csum = cpu_to_be16(crc_t10dif(buf, len));
- break;
- }
+
return csum;
}
static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
sector_t sector, u32 ei_lba)
{
- u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+ __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
if (sdt->guard_tag != csum) {
pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -1775,59 +1786,71 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
return 0;
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
- unsigned int sectors, u32 ei_lba)
+static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+ unsigned int sectors, bool read)
{
unsigned int i, resid;
struct scatterlist *psgl;
- struct sd_dif_tuple *sdt;
- sector_t sector;
- sector_t tmp_sec = start_sec;
void *paddr;
+ const void *dif_store_end = dif_storep + sdebug_store_sectors;
- start_sec = do_div(tmp_sec, sdebug_store_sectors);
+ /* Bytes of protection data to copy into sgl */
+ resid = sectors * sizeof(*dif_storep);
- sdt = dif_storep + start_sec;
+ scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
+ int len = min(psgl->length, resid);
+ void *start = dif_store(sector);
+ int rest = 0;
- for (i = 0 ; i < sectors ; i++) {
- int ret;
+ if (dif_store_end < start + len)
+ rest = start + len - dif_store_end;
- if (sdt[i].app_tag == 0xffff)
- continue;
+ paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
- sector = start_sec + i;
+ if (read)
+ memcpy(paddr, start, len - rest);
+ else
+ memcpy(start, paddr, len - rest);
- ret = dif_verify(&sdt[i],
- fake_storep + sector * scsi_debug_sector_size,
- sector, ei_lba);
- if (ret) {
- dif_errors++;
- return ret;
+ if (rest) {
+ if (read)
+ memcpy(paddr + len - rest, dif_storep, rest);
+ else
+ memcpy(dif_storep, paddr + len - rest, rest);
}
- ei_lba++;
+ sector += len / sizeof(*dif_storep);
+ resid -= len;
+ kunmap_atomic(paddr);
}
+}
- /* Bytes of protection data to copy into sgl */
- resid = sectors * sizeof(*dif_storep);
- sector = start_sec;
+static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+ unsigned int sectors, u32 ei_lba)
+{
+ unsigned int i;
+ struct sd_dif_tuple *sdt;
+ sector_t sector;
- scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
- int len = min(psgl->length, resid);
+ for (i = 0; i < sectors; i++) {
+ int ret;
- paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
- memcpy(paddr, dif_storep + sector, len);
+ sector = start_sec + i;
+ sdt = dif_store(sector);
- sector += len / sizeof(*dif_storep);
- if (sector >= sdebug_store_sectors) {
- /* Force wrap */
- tmp_sec = sector;
- sector = do_div(tmp_sec, sdebug_store_sectors);
+ if (sdt->app_tag == cpu_to_be16(0xffff))
+ continue;
+
+ ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+ if (ret) {
+ dif_errors++;
+ return ret;
}
- resid -= len;
- kunmap_atomic(paddr);
+
+ ei_lba++;
}
+ dif_copy_prot(SCpnt, start_sec, sectors, true);
dix_reads++;
return 0;
@@ -1910,15 +1933,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
{
int i, j, ret;
struct sd_dif_tuple *sdt;
- struct scatterlist *dsgl = scsi_sglist(SCpnt);
+ struct scatterlist *dsgl;
struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
void *daddr, *paddr;
- sector_t tmp_sec = start_sec;
- sector_t sector;
+ sector_t sector = start_sec;
int ppage_offset;
- sector = do_div(tmp_sec, sdebug_store_sectors);
-
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
@@ -1946,25 +1966,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = paddr + ppage_offset;
- ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
+ ret = dif_verify(sdt, daddr + j, sector, ei_lba);
if (ret) {
dump_sector(daddr + j, scsi_debug_sector_size);
goto out;
}
- /* Would be great to copy this in bigger
- * chunks. However, for the sake of
- * correctness we need to verify each sector
- * before writing it to "stable" storage
- */
- memcpy(dif_storep + sector, sdt, sizeof(*sdt));
-
sector++;
-
- if (sector == sdebug_store_sectors)
- sector = 0; /* Force wrap */
-
- start_sec++;
ei_lba++;
ppage_offset += sizeof(struct sd_dif_tuple);
}
@@ -1973,6 +1981,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
kunmap_atomic(daddr);
}
+ dif_copy_prot(SCpnt, start_sec, sectors, false);
dix_writes++;
return 0;
@@ -1997,8 +2006,14 @@ static unsigned long lba_to_map_index(sector_t lba)
static sector_t map_index_to_lba(unsigned long index)
{
- return index * scsi_debug_unmap_granularity -
- scsi_debug_unmap_alignment;
+ sector_t lba = index * scsi_debug_unmap_granularity;
+
+ if (scsi_debug_unmap_alignment) {
+ lba -= scsi_debug_unmap_granularity -
+ scsi_debug_unmap_alignment;
+ }
+
+ return lba;
}
static unsigned int map_state(sector_t lba, unsigned int *num)
@@ -2659,8 +2674,8 @@ static void __init sdebug_build_parts(unsigned char *ramp,
/ sdebug_sectors_per;
pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
- pp->start_sect = start_sec;
- pp->nr_sects = end_sec - start_sec + 1;
+ pp->start_sect = cpu_to_le32(start_sec);
+ pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
pp->sys_ind = 0x83; /* plain Linux partition */
}
}
@@ -2736,7 +2751,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@@ -3166,7 +3181,7 @@ DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
}
DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 21505962f539..e8bee9f0ad0f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -87,6 +87,18 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(scsi_schedule_eh);
+static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
+{
+ if (!shost->last_reset || !shost->eh_deadline)
+ return 0;
+
+ if (time_before(jiffies,
+ shost->last_reset + shost->eh_deadline))
+ return 0;
+
+ return 1;
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -109,6 +121,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock;
+ if (shost->eh_deadline && !shost->last_reset)
+ shost->last_reset = jiffies;
+
ret = 1;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
@@ -138,6 +153,9 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ if (host->eh_deadline && !host->last_reset)
+ host->last_reset = jiffies;
+
if (host->transportt->eh_timed_out)
rtn = host->transportt->eh_timed_out(scmd);
else if (host->hostt->eh_timed_out)
@@ -223,12 +241,80 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
}
#endif
+ /**
+ * scsi_report_lun_change - Set flag on all *other* devices on the same target
+ * to indicate that a UNIT ATTENTION is expected.
+ * @sdev: Device reporting the UNIT ATTENTION
+ */
+static void scsi_report_lun_change(struct scsi_device *sdev)
+{
+ sdev->sdev_target->expecting_lun_change = 1;
+}
+
+/**
+ * scsi_report_sense - Examine scsi sense information and log messages for
+ * certain conditions, also issue uevents for some of them.
+ * @sdev: Device reporting the sense code
+ * @sshdr: sshdr to be examined
+ */
+static void scsi_report_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
+{
+ enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
+
+ if (sshdr->sense_key == UNIT_ATTENTION) {
+ if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
+ evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Inquiry data has changed");
+ } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
+ evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
+ scsi_report_lun_change(sdev);
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN assignments on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically remap LUN assignments.\n");
+ } else if (sshdr->asc == 0x3f)
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "operating parameters on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically adjust these parameters.\n");
+
+ if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
+ evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN reached a thin provisioning soft "
+ "threshold.\n");
+ }
+
+ if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
+ evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Mode parameters changed");
+ } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
+ evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Capacity data has changed");
+ } else if (sshdr->asc == 0x2a)
+ sdev_printk(KERN_WARNING, sdev,
+ "Parameters changed");
+ }
+
+ if (evt_type != SDEV_EVT_MAXBITS) {
+ set_bit(evt_type, sdev->pending_events);
+ schedule_work(&sdev->event_work);
+ }
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
*
* Notes:
* When a deferred error is detected the current command has
@@ -250,6 +336,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
+ scsi_report_sense(sdev, &sshdr);
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -315,6 +403,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
}
}
/*
+ * we might also expect a cc/ua if another LUN on the target
+ * reported a UA with an ASC/ASCQ of 3F 0E -
+ * REPORTED LUNS DATA HAS CHANGED.
+ */
+ if (scmd->device->sdev_target->expecting_lun_change &&
+ sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
+ return NEEDS_RETRY;
+ /*
* if the device is in the process of becoming ready, we
* should retry.
*/
@@ -327,26 +423,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->allow_restart &&
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
return FAILED;
-
- if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "LUN assignments on this target have "
- "changed. The Linux SCSI layer does not "
- "automatically remap LUN assignments.\n");
- else if (sshdr.asc == 0x3f)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
- "changed. The Linux SCSI layer does not "
- "automatically adjust these parameters.\n");
-
- if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "LUN reached a thin provisioning soft "
- "threshold.\n");
-
/*
* Pass the UA upwards for a determination in the completion
* functions.
@@ -354,18 +430,25 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
/* these are not supported */
+ case DATA_PROTECT:
+ if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
+ /* Thin provisioning hard threshold reached */
+ set_host_byte(scmd, DID_ALLOC_FAILURE);
+ return SUCCESS;
+ }
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- case DATA_PROTECT:
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
+ return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_MEDIUM_ERROR);
+ return SUCCESS;
}
return NEEDS_RETRY;
@@ -373,14 +456,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26) { /* Parameter value invalid */
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
@@ -843,7 +926,6 @@ retry:
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
- case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -926,13 +1008,26 @@ int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ struct Scsi_Host *shost;
int rtn;
+ unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
SCSI_SENSE_VALID(scmd))
continue;
+ shost = scmd->device->host;
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
@@ -1018,11 +1113,28 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev;
int finish_cmds;
+ unsigned long flags;
while (!list_empty(cmd_list)) {
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
sdev = scmd->device;
+ if (!try_stu) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ /* Push items back onto work_q */
+ list_splice_init(cmd_list, work_q);
+ spin_unlock_irqrestore(sdev->host->host_lock,
+ flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, sdev->host,
+ "skip %s, past eh deadline",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
+
finish_cmds = !scsi_device_online(scmd->device) ||
(try_stu && !scsi_eh_try_stu(scmd) &&
!scsi_eh_tur(scmd)) ||
@@ -1058,26 +1170,42 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list);
int rtn;
+ struct Scsi_Host *shost;
+ unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
continue;
+ shost = scmd->device->host;
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
"0x%p\n", current->comm,
scmd));
- rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
- if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
- scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (rtn == FAST_IO_FAIL)
- scsi_eh_finish_cmd(scmd, done_q);
- else
- list_move_tail(&scmd->eh_entry, &check_list);
- } else
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn == FAILED) {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:"
"0x%p\n",
current->comm,
scmd));
+ list_splice_init(&check_list, work_q);
+ return list_empty(work_q);
+ }
+ scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
+ if (rtn == FAST_IO_FAIL)
+ scsi_eh_finish_cmd(scmd, done_q);
+ else
+ list_move_tail(&scmd->eh_entry, &check_list);
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
@@ -1123,8 +1251,19 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev;
+ unsigned long flags;
shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
stu_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
@@ -1177,9 +1316,20 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{
struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev;
+ unsigned long flags;
int rtn;
shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
bdr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev) {
@@ -1239,6 +1389,21 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct scsi_cmnd *next, *scmd;
int rtn;
unsigned int id;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ /* push back on work queue for further processing */
+ list_splice_init(&check_list, work_q);
+ list_splice_init(&tmp_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd);
@@ -1283,6 +1448,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
LIST_HEAD(check_list);
unsigned int channel;
int rtn;
+ unsigned long flags;
/*
* we really want to loop over the various channels, and do this on
@@ -1292,6 +1458,18 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
*/
for (channel = 0; channel <= shost->max_channel; channel++) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_eh_past_deadline(shost)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ list_splice_init(&check_list, work_q);
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "skip %s, past eh deadline\n",
+ __func__));
+ return list_empty(work_q);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
chan_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (channel == scmd_channel(scmd)) {
@@ -1568,6 +1746,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
*/
return ADD_TO_MLQUEUE;
case GOOD:
+ if (scmd->cmnd[0] == REPORT_LUNS)
+ scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
case COMMAND_TERMINATED:
return SUCCESS;
@@ -1577,14 +1757,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
- else if (rtn == TARGET_ERROR) {
- /*
- * Need to modify host byte to signal a
- * permanent target failure
- */
- set_host_byte(scmd, DID_TARGET_FAILURE);
- rtn = SUCCESS;
- }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
@@ -1697,8 +1869,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* will be requests for character device operations, and also for
* ioctls to queued block devices.
*/
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
- __func__));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ printk("scsi_eh_%d waking up host to restart\n",
+ shost->host_no));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1825,6 +1998,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->eh_deadline)
+ shost->last_reset = 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_flush_done_q(&eh_done_q);
}
@@ -1851,7 +2028,7 @@ int scsi_error_handler(void *data)
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != shost->host_busy) {
SCSI_LOG_ERROR_RECOVERY(1,
- printk("Error handler scsi_eh_%d sleeping\n",
+ printk("scsi_eh_%d: sleeping\n",
shost->host_no));
schedule();
continue;
@@ -1859,8 +2036,9 @@ int scsi_error_handler(void *data)
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
- printk("Error handler scsi_eh_%d waking up\n",
- shost->host_no));
+ printk("scsi_eh_%d: waking up %d/%d/%d\n",
+ shost->host_no, shost->host_eh_scheduled,
+ shost->host_failed, shost->host_busy));
/*
* We have a host that is failing for some reason. Figure out
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 124392f3091e..d1549b74e2d1 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -68,28 +68,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
struct kmem_cache *scsi_sdb_cache;
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-
-static bool acpi_scsi_bus_match(struct device *dev)
-{
- return dev->bus == &scsi_bus_type;
-}
-
-int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
-{
- bus->match = acpi_scsi_bus_match;
- return register_acpi_bus_type(bus);
-}
-EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
-
-void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
-{
- unregister_acpi_bus_type(bus);
-}
-EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
-#endif
-
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
@@ -716,6 +694,20 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+/**
+ * __scsi_error_from_host_byte - translate SCSI error code into errno
+ * @cmd: SCSI command (unused)
+ * @result: scsi error code
+ *
+ * Translate SCSI error code into standard UNIX errno.
+ * Return values:
+ * -ENOLINK temporary transport failure
+ * -EREMOTEIO permanent target failure, do not retry
+ * -EBADE permanent nexus failure, retry on other path
+ * -ENOSPC No write space available
+ * -ENODATA Medium error
+ * -EIO unspecified I/O error
+ */
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
int error = 0;
@@ -732,6 +724,14 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
+ case DID_ALLOC_FAILURE:
+ set_host_byte(cmd, DID_OK);
+ error = -ENOSPC;
+ break;
+ case DID_MEDIUM_ERROR:
+ set_host_byte(cmd, DID_OK);
+ error = -ENODATA;
+ break;
default:
error = -EIO;
break;
@@ -2231,7 +2231,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_MEDIA_CHANGE:
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
-
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
+ break;
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
+ break;
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
+ break;
default:
/* do nothing */
break;
@@ -2252,10 +2266,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
void scsi_evt_thread(struct work_struct *work)
{
struct scsi_device *sdev;
+ enum scsi_device_event evt_type;
LIST_HEAD(event_list);
sdev = container_of(work, struct scsi_device, event_work);
+ for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
+ if (test_and_clear_bit(evt_type, sdev->pending_events))
+ sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
+
while (1) {
struct scsi_event *evt;
struct list_head *this, *tmp;
@@ -2325,6 +2344,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
/* evt_type-specific initialization, if any */
switch (evt_type) {
case SDEV_EVT_MEDIA_CHANGE:
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
default:
/* do nothing */
break;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 4c5aabe21755..af4c050ce6e4 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -54,7 +54,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
/*
* All the high-level SCSI drivers that implement runtime
* PM treat runtime suspend, system suspend, and system
- * hibernate identically.
+ * hibernate nearly identically. In all cases the requirements
+ * for runtime suspension are stricter.
*/
if (pm_runtime_suspended(dev))
return 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7e50061e9ef6..8ff62c26a41c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -281,6 +281,42 @@ exit_store_host_reset:
static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
+static ssize_t
+show_shost_eh_deadline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+
+ return sprintf(buf, "%d\n", shost->eh_deadline / HZ);
+}
+
+static ssize_t
+store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ int ret = -EINVAL;
+ int deadline;
+ unsigned long flags;
+
+ if (shost->transportt && shost->transportt->eh_strategy_handler)
+ return ret;
+
+ if (sscanf(buf, "%d\n", &deadline) == 1) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_in_recovery(shost))
+ ret = -EBUSY;
+ else {
+ shost->eh_deadline = deadline * HZ;
+ ret = count;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
+
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -308,6 +344,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_prot_capabilities.attr,
&dev_attr_prot_guard_type.attr,
&dev_attr_host_reset.attr,
+ &dev_attr_eh_deadline.attr,
NULL
};
@@ -529,6 +566,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
*/
sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (queue_depth, "%d\n");
+sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n");
@@ -739,12 +777,18 @@ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
#define REF_EVT(name) &dev_attr_evt_##name.attr
DECLARE_EVT(media_change, MEDIA_CHANGE)
+DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
+DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
+DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
+DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
+DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
/* Default template for device attributes. May NOT be modified */
static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_device_blocked.attr,
&dev_attr_type.attr,
&dev_attr_scsi_level.attr,
+ &dev_attr_device_busy.attr,
&dev_attr_vendor.attr,
&dev_attr_model.attr,
&dev_attr_rev.attr,
@@ -759,6 +803,11 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_ioerr_cnt.attr,
&dev_attr_modalias.attr,
REF_EVT(media_change),
+ REF_EVT(inquiry_change_reported),
+ REF_EVT(capacity_change_reported),
+ REF_EVT(soft_threshold_reached),
+ REF_EVT(mode_parameter_change_reported),
+ REF_EVT(lun_change_reported),
NULL
};
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index abf7c402e1a5..63a6ca49d4e5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/bsg-lib.h>
#include <linux/idr.h>
-#include <linux/list.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2745,6 +2744,28 @@ exit_get_chap:
return err;
}
+static int iscsi_set_chap(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err = 0;
+
+ if (!transport->set_chap)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_path.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_path.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_chap(shost, data, len);
+ scsi_host_put(shost);
+ return err;
+}
+
static int iscsi_delete_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -3235,6 +3256,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
err = iscsi_logout_flashnode_sid(transport, ev);
break;
+ case ISCSI_UEVENT_SET_CHAP:
+ err = iscsi_set_chap(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
default:
err = -ENOSYS;
break;
@@ -3327,6 +3352,23 @@ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT);
+iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN);
+iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO);
+iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE);
+iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT);
+iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE);
+iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE);
+iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE);
+iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN);
+iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE);
+iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS);
+iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC);
+iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
+iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
+iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
+iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
+
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3379,6 +3421,22 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_persistent_port.attr,
&dev_attr_conn_ping_tmo.attr,
&dev_attr_conn_recv_tmo.attr,
+ &dev_attr_conn_local_port.attr,
+ &dev_attr_conn_statsn.attr,
+ &dev_attr_conn_keepalive_tmo.attr,
+ &dev_attr_conn_max_segment_size.attr,
+ &dev_attr_conn_tcp_timestamp_stat.attr,
+ &dev_attr_conn_tcp_wsf_disable.attr,
+ &dev_attr_conn_tcp_nagle_disable.attr,
+ &dev_attr_conn_tcp_timer_scale.attr,
+ &dev_attr_conn_tcp_timestamp_enable.attr,
+ &dev_attr_conn_fragment_disable.attr,
+ &dev_attr_conn_ipv4_tos.attr,
+ &dev_attr_conn_ipv6_traffic_class.attr,
+ &dev_attr_conn_ipv6_flow_label.attr,
+ &dev_attr_conn_is_fw_assigned_ipv6.attr,
+ &dev_attr_conn_tcp_xmit_wsf.attr,
+ &dev_attr_conn_tcp_recv_wsf.attr,
NULL,
};
@@ -3416,6 +3474,38 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_PING_TMO;
else if (attr == &dev_attr_conn_recv_tmo.attr)
param = ISCSI_PARAM_RECV_TMO;
+ else if (attr == &dev_attr_conn_local_port.attr)
+ param = ISCSI_PARAM_LOCAL_PORT;
+ else if (attr == &dev_attr_conn_statsn.attr)
+ param = ISCSI_PARAM_STATSN;
+ else if (attr == &dev_attr_conn_keepalive_tmo.attr)
+ param = ISCSI_PARAM_KEEPALIVE_TMO;
+ else if (attr == &dev_attr_conn_max_segment_size.attr)
+ param = ISCSI_PARAM_MAX_SEGMENT_SIZE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_STAT;
+ else if (attr == &dev_attr_conn_tcp_wsf_disable.attr)
+ param = ISCSI_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_nagle_disable.attr)
+ param = ISCSI_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_timer_scale.attr)
+ param = ISCSI_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_conn_fragment_disable.attr)
+ param = ISCSI_PARAM_IP_FRAGMENT_DISABLE;
+ else if (attr == &dev_attr_conn_ipv4_tos.attr)
+ param = ISCSI_PARAM_IPV4_TOS;
+ else if (attr == &dev_attr_conn_ipv6_traffic_class.attr)
+ param = ISCSI_PARAM_IPV6_TC;
+ else if (attr == &dev_attr_conn_ipv6_flow_label.attr)
+ param = ISCSI_PARAM_IPV6_FLOW_LABEL;
+ else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr)
+ param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6;
+ else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr)
+ param = ISCSI_PARAM_TCP_XMIT_WSF;
+ else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
+ param = ISCSI_PARAM_TCP_RECV_WSF;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
@@ -3476,6 +3566,21 @@ iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
+iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0);
+iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0);
+iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0);
+iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0);
+iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0);
+iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0);
+iscsi_session_attr(discovery_auth_optional,
+ ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0);
+iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0);
+iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0);
+iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0);
+iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0);
+iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
+iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
+iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -3580,6 +3685,20 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
&dev_attr_priv_sess_target_id.attr,
+ &dev_attr_sess_auto_snd_tgt_disable.attr,
+ &dev_attr_sess_discovery_session.attr,
+ &dev_attr_sess_portal_type.attr,
+ &dev_attr_sess_chap_auth.attr,
+ &dev_attr_sess_discovery_logout.attr,
+ &dev_attr_sess_bidi_chap.attr,
+ &dev_attr_sess_discovery_auth_optional.attr,
+ &dev_attr_sess_def_time2wait.attr,
+ &dev_attr_sess_def_time2retain.attr,
+ &dev_attr_sess_isid.attr,
+ &dev_attr_sess_tsid.attr,
+ &dev_attr_sess_def_taskmgmt_tmo.attr,
+ &dev_attr_sess_discovery_parent_idx.attr,
+ &dev_attr_sess_discovery_parent_type.attr,
NULL,
};
@@ -3643,6 +3762,34 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_BOOT_NIC;
else if (attr == &dev_attr_sess_boot_target.attr)
param = ISCSI_PARAM_BOOT_TARGET;
+ else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr)
+ param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE;
+ else if (attr == &dev_attr_sess_discovery_session.attr)
+ param = ISCSI_PARAM_DISCOVERY_SESS;
+ else if (attr == &dev_attr_sess_portal_type.attr)
+ param = ISCSI_PARAM_PORTAL_TYPE;
+ else if (attr == &dev_attr_sess_chap_auth.attr)
+ param = ISCSI_PARAM_CHAP_AUTH_EN;
+ else if (attr == &dev_attr_sess_discovery_logout.attr)
+ param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN;
+ else if (attr == &dev_attr_sess_bidi_chap.attr)
+ param = ISCSI_PARAM_BIDI_CHAP_EN;
+ else if (attr == &dev_attr_sess_discovery_auth_optional.attr)
+ param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL;
+ else if (attr == &dev_attr_sess_def_time2wait.attr)
+ param = ISCSI_PARAM_DEF_TIME2WAIT;
+ else if (attr == &dev_attr_sess_def_time2retain.attr)
+ param = ISCSI_PARAM_DEF_TIME2RETAIN;
+ else if (attr == &dev_attr_sess_isid.attr)
+ param = ISCSI_PARAM_ISID;
+ else if (attr == &dev_attr_sess_tsid.attr)
+ param = ISCSI_PARAM_TSID;
+ else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr)
+ param = ISCSI_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_sess_discovery_parent_idx.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_IDX;
+ else if (attr == &dev_attr_sess_discovery_parent_type.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE;
else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 86fcf2c313ad..fd874b846dc1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,8 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *);
+static int sd_suspend_system(struct device *);
+static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
@@ -132,8 +133,8 @@ static const char *sd_cache_types[] = {
};
static ssize_t
-sd_store_cache_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int i, ct = -1, rcd, wce, sp;
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -199,8 +200,18 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+manage_start_stop_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
+}
+
+static ssize_t
+manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -212,10 +223,19 @@ sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(manage_start_stop);
+
+static ssize_t
+allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
+}
static ssize_t
-sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+allow_restart_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -230,47 +250,30 @@ sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(allow_restart);
static ssize_t
-sd_show_cache_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
int ct = sdkp->RCD + 2*sdkp->WCE;
return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
}
+static DEVICE_ATTR_RW(cache_type);
static ssize_t
-sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf)
+FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
}
+static DEVICE_ATTR_RO(FUA);
static ssize_t
-sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct scsi_device *sdp = sdkp->device;
-
- return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
-}
-
-static ssize_t
-sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_disk *sdkp = to_scsi_disk(dev);
-
- return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
-}
-
-static ssize_t
-sd_show_protection_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+protection_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -278,8 +281,8 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_protection_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+protection_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
unsigned int val;
@@ -298,10 +301,11 @@ sd_store_protection_type(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(protection_type);
static ssize_t
-sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+protection_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -320,24 +324,26 @@ sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
}
+static DEVICE_ATTR_RO(protection_mode);
static ssize_t
-sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
- char *buf)
+app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->ATO);
}
+static DEVICE_ATTR_RO(app_tag_own);
static ssize_t
-sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
- char *buf)
+thin_provisioning_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->lbpme);
}
+static DEVICE_ATTR_RO(thin_provisioning);
static const char *lbp_mode[] = {
[SD_LBP_FULL] = "full",
@@ -349,8 +355,8 @@ static const char *lbp_mode[] = {
};
static ssize_t
-sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+provisioning_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -358,8 +364,8 @@ sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+provisioning_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -385,10 +391,11 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(provisioning_mode);
static ssize_t
-sd_show_max_medium_access_timeouts(struct device *dev,
- struct device_attribute *attr, char *buf)
+max_medium_access_timeouts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -396,9 +403,9 @@ sd_show_max_medium_access_timeouts(struct device *dev,
}
static ssize_t
-sd_store_max_medium_access_timeouts(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+max_medium_access_timeouts_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
int err;
@@ -410,10 +417,11 @@ sd_store_max_medium_access_timeouts(struct device *dev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_medium_access_timeouts);
static ssize_t
-sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
- char *buf)
+max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -421,8 +429,8 @@ sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -451,43 +459,37 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
return count;
}
-
-static struct device_attribute sd_disk_attrs[] = {
- __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
- sd_store_cache_type),
- __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
- __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
- sd_store_allow_restart),
- __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
- sd_store_manage_start_stop),
- __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type,
- sd_store_protection_type),
- __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
- __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
- __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
- __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
- sd_store_provisioning_mode),
- __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
- sd_show_write_same_blocks, sd_store_write_same_blocks),
- __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
- sd_show_max_medium_access_timeouts,
- sd_store_max_medium_access_timeouts),
- __ATTR_NULL,
+static DEVICE_ATTR_RW(max_write_same_blocks);
+
+static struct attribute *sd_disk_attrs[] = {
+ &dev_attr_cache_type.attr,
+ &dev_attr_FUA.attr,
+ &dev_attr_allow_restart.attr,
+ &dev_attr_manage_start_stop.attr,
+ &dev_attr_protection_type.attr,
+ &dev_attr_protection_mode.attr,
+ &dev_attr_app_tag_own.attr,
+ &dev_attr_thin_provisioning.attr,
+ &dev_attr_provisioning_mode.attr,
+ &dev_attr_max_write_same_blocks.attr,
+ &dev_attr_max_medium_access_timeouts.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(sd_disk);
static struct class sd_disk_class = {
.name = "scsi_disk",
.owner = THIS_MODULE,
.dev_release = scsi_disk_release,
- .dev_attrs = sd_disk_attrs,
+ .dev_groups = sd_disk_groups,
};
static const struct dev_pm_ops sd_pm_ops = {
- .suspend = sd_suspend,
+ .suspend = sd_suspend_system,
.resume = sd_resume,
- .poweroff = sd_suspend,
+ .poweroff = sd_suspend_system,
.restore = sd_resume,
- .runtime_suspend = sd_suspend,
+ .runtime_suspend = sd_suspend_runtime,
.runtime_resume = sd_resume,
};
@@ -828,7 +830,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{
- rq->timeout = SD_FLUSH_TIMEOUT;
+ rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
rq->retries = SD_MAX_RETRIES;
rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10;
@@ -1432,12 +1434,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
{
int retries, res;
struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout
+ * SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr sshdr;
if (!scsi_device_online(sdp))
return -ENODEV;
-
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 };
@@ -1447,20 +1450,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* flush everything.
*/
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
- &sshdr, SD_FLUSH_TIMEOUT,
- SD_MAX_RETRIES, NULL, REQ_PM);
+ &sshdr, timeout, SD_MAX_RETRIES,
+ NULL, REQ_PM);
if (res == 0)
break;
}
if (res) {
sd_print_result(sdkp, res);
+
if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
+ /* we need to evaluate the error return */
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ /* this is no error here */
+ return 0;
+
+ switch (host_byte(res)) {
+ /* ignore errors due to racing a disconnection */
+ case DID_BAD_TARGET:
+ case DID_NO_CONNECT:
+ return 0;
+ /* signal the upper layer it might try again */
+ case DID_BUS_BUSY:
+ case DID_IMM_RETRY:
+ case DID_REQUEUE:
+ case DID_SOFT_ERROR:
+ return -EBUSY;
+ default:
+ return -EIO;
+ }
}
-
- if (res)
- return -EIO;
return 0;
}
@@ -2419,14 +2441,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
}
- if (modepage == 0x3F) {
- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
- "present\n");
- goto defaults;
- } else if ((buffer[offset] & 0x3f) != modepage) {
- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
- goto defaults;
- }
+ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+ goto defaults;
+
Page_found:
if (modepage == 8) {
sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
@@ -2643,13 +2660,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_device *sdev = sdkp->device;
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+ /* too large values might cause issues with arcmsr */
+ int vpd_buf_len = 64;
+
sdev->no_report_opcodes = 1;
/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
* CODES is unsupported and the device has an ATA
* Information VPD page (SAT).
*/
- if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+ if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
sdev->no_write_same = 1;
}
@@ -2858,6 +2878,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->events |= DISK_EVENT_MEDIA_CHANGE;
}
+ blk_pm_runtime_init(sdp->request_queue, dev);
add_disk(gd);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
@@ -2866,7 +2887,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
- blk_pm_runtime_init(sdp->request_queue, dev);
scsi_autopm_put_device(sdp);
put_device(&sdkp->dev);
}
@@ -3062,9 +3082,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
sd_print_result(sdkp, res);
if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
+ if (scsi_sense_valid(&sshdr) &&
+ /* 0x3a is medium not present */
+ sshdr.asc == 0x3a)
+ res = 0;
}
- return res;
+ /* SCSI error codes must not go to the generic layer */
+ if (res)
+ return -EIO;
+
+ return 0;
}
/*
@@ -3082,7 +3110,7 @@ static void sd_shutdown(struct device *dev)
if (pm_runtime_suspended(dev))
goto exit;
- if (sdkp->WCE) {
+ if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp);
}
@@ -3096,7 +3124,7 @@ exit:
scsi_disk_put(sdkp);
}
-static int sd_suspend(struct device *dev)
+static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
@@ -3104,16 +3132,23 @@ static int sd_suspend(struct device *dev)
if (!sdkp)
return 0; /* this can happen */
- if (sdkp->WCE) {
+ if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp);
- if (ret)
+ if (ret) {
+ /* ignore OFFLINE device */
+ if (ret == -ENODEV)
+ ret = 0;
goto done;
+ }
}
if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ /* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
+ if (ignore_stop_errors)
+ ret = 0;
}
done:
@@ -3121,6 +3156,16 @@ done:
return ret;
}
+static int sd_suspend_system(struct device *dev)
+{
+ return sd_suspend_common(dev, true);
+}
+
+static int sd_suspend_runtime(struct device *dev)
+{
+ return sd_suspend_common(dev, false);
+}
+
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 7a049de22051..26895ff247c5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -13,7 +13,11 @@
*/
#define SD_TIMEOUT (30 * HZ)
#define SD_MOD_TIMEOUT (75 * HZ)
-#define SD_FLUSH_TIMEOUT (60 * HZ)
+/*
+ * Flush timeout is a multiplier over the standard device timeout which is
+ * user modifiable via sysfs but initially set to SD_TIMEOUT
+ */
+#define SD_FLUSH_TIMEOUT_MULTIPLIER 2
#define SD_WRITE_SAME_TIMEOUT (120 * HZ)
/*
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e1..5cbc4bb1b395 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,11 +105,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
static int sg_add(struct device *, struct class_interface *);
static void sg_remove(struct device *, struct class_interface *);
-static DEFINE_SPINLOCK(sg_open_exclusive_lock);
-
static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
- file descriptor list for device */
+static DEFINE_RWLOCK(sg_index_lock);
static struct class_interface sg_interface = {
.add_dev = sg_add,
@@ -146,8 +143,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
- /* sfd_siblings is protected by sg_index_lock */
- struct list_head sfd_siblings;
+ struct list_head sfd_siblings; /* protected by sfd_lock of device */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -170,13 +166,12 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
- wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
- /* sfds is protected by sg_index_lock */
+ spinlock_t sfd_lock; /* protect file descriptor list for device */
struct list_head sfds;
+ struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
volatile char detached; /* 0->attached, 1->detached pending removal */
- /* exclude protected by sg_open_exclusive_lock */
char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
@@ -225,35 +220,14 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
-static int get_exclude(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- ret = sdp->exclude;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return ret;
-}
-
-static int set_exclude(Sg_device *sdp, char val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- sdp->exclude = val;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return val;
-}
-
static int sfds_list_empty(Sg_device *sdp)
{
unsigned long flags;
int ret;
- read_lock_irqsave(&sg_index_lock, flags);
+ spin_lock_irqsave(&sdp->sfd_lock, flags);
ret = list_empty(&sdp->sfds);
- read_unlock_irqrestore(&sg_index_lock, flags);
+ spin_unlock_irqrestore(&sdp->sfd_lock, flags);
return ret;
}
@@ -265,7 +239,6 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
- int res;
int retval;
nonseekable_open(inode, filp);
@@ -294,54 +267,52 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out;
}
- if (flags & O_EXCL) {
- if (O_RDONLY == (flags & O_ACCMODE)) {
- retval = -EPERM; /* Can't lock it with read only access */
- goto error_out;
- }
- if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait,
- ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
- if (flags & O_NONBLOCK) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- }
- if (sdp->detached) {
- retval = -ENODEV;
+ if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
+ retval = -EPERM; /* Can't lock it with read only access */
goto error_out;
}
+ if (flags & O_NONBLOCK) {
+ if (flags & O_EXCL) {
+ if (!down_write_trylock(&sdp->o_sem)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ } else {
+ if (!down_read_trylock(&sdp->o_sem)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ }
+ } else {
+ if (flags & O_EXCL)
+ down_write(&sdp->o_sem);
+ else
+ down_read(&sdp->o_sem);
+ }
+ /* Since write lock is held, no need to check sfd_list */
+ if (flags & O_EXCL)
+ sdp->exclude = 1; /* used by release lock */
+
if (sfds_list_empty(sdp)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
- if ((sfp = sg_add_sfp(sdp, dev)))
+ sfp = sg_add_sfp(sdp, dev);
+ if (!IS_ERR(sfp))
filp->private_data = sfp;
+ /* retval is already provably zero at this point because of the
+ * check after retval = scsi_autopm_get_device(sdp->device))
+ */
else {
+ retval = PTR_ERR(sfp);
+
if (flags & O_EXCL) {
- set_exclude(sdp, 0); /* undo if error */
- wake_up_interruptible(&sdp->o_excl_wait);
- }
- retval = -ENOMEM;
- goto error_out;
- }
- retval = 0;
+ sdp->exclude = 0; /* undo if error */
+ up_write(&sdp->o_sem);
+ } else
+ up_read(&sdp->o_sem);
error_out:
- if (retval) {
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
@@ -358,13 +329,18 @@ sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
+ int excl;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
- set_exclude(sdp, 0);
- wake_up_interruptible(&sdp->o_excl_wait);
+ excl = sdp->exclude;
+ sdp->exclude = 0;
+ if (excl)
+ up_write(&sdp->o_sem);
+ else
+ up_read(&sdp->o_sem);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1415,8 +1391,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
+ spin_lock_init(&sdp->sfd_lock);
INIT_LIST_HEAD(&sdp->sfds);
- init_waitqueue_head(&sdp->o_excl_wait);
+ init_rwsem(&sdp->o_sem);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -1549,11 +1526,13 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
/* Need a write lock to set sdp->detached. */
write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock(&sdp->sfd_lock);
sdp->detached = 1;
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
+ spin_unlock(&sdp->sfd_lock);
write_unlock_irqrestore(&sg_index_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2064,7 +2043,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
@@ -2078,9 +2057,13 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
- write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock_irqsave(&sdp->sfd_lock, iflags);
+ if (sdp->detached) {
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ return ERR_PTR(-ENODEV);
+ }
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
- write_unlock_irqrestore(&sg_index_lock, iflags);
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
@@ -2130,10 +2113,9 @@ static void sg_remove_sfp(struct kref *kref)
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
- write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- wake_up_interruptible(&sdp->o_excl_wait);
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
@@ -2520,7 +2502,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
return 0;
}
-/* must be called while holding sg_index_lock */
+/* must be called while holding sg_index_lock and sfd_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
@@ -2605,22 +2587,26 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && !list_empty(&sdp->sfds)) {
- struct scsi_device *scsidp = sdp->device;
+ if (sdp) {
+ spin_lock(&sdp->sfd_lock);
+ if (!list_empty(&sdp->sfds)) {
+ struct scsi_device *scsidp = sdp->device;
- seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
- if (sdp->detached)
- seq_printf(s, "detached pending close ");
- else
- seq_printf
- (s, "scsi%d chan=%d id=%d lun=%d em=%d",
- scsidp->host->host_no,
- scsidp->channel, scsidp->id,
- scsidp->lun,
- scsidp->host->hostt->emulated);
- seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, get_exclude(sdp));
- sg_proc_debug_helper(s, sdp);
+ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+ if (sdp->detached)
+ seq_printf(s, "detached pending close ");
+ else
+ seq_printf
+ (s, "scsi%d chan=%d id=%d lun=%d em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ seq_printf(s, " sg_tablesize=%d excl=%d\n",
+ sdp->sg_tablesize, sdp->exclude);
+ sg_proc_debug_helper(s, sdp);
+ }
+ spin_unlock(&sdp->sfd_lock);
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2a32036a9404..ff44b3c2cff2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -82,7 +82,7 @@ static int try_rdio = 1;
static int try_wdio = 1;
static struct class st_sysfs_class;
-static struct device_attribute st_dev_attrs[];
+static const struct attribute_group *st_dev_groups[];
MODULE_AUTHOR("Kai Makisara");
MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -4274,7 +4274,7 @@ static void scsi_tape_release(struct kref *kref)
static struct class st_sysfs_class = {
.name = "scsi_tape",
- .dev_attrs = st_dev_attrs,
+ .dev_groups = st_dev_groups,
};
static int __init init_st(void)
@@ -4408,6 +4408,7 @@ defined_show(struct device *dev, struct device_attribute *attr, char *buf)
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
return l;
}
+static DEVICE_ATTR_RO(defined);
static ssize_t
default_blksize_show(struct device *dev, struct device_attribute *attr,
@@ -4419,7 +4420,7 @@ default_blksize_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
return l;
}
-
+static DEVICE_ATTR_RO(default_blksize);
static ssize_t
default_density_show(struct device *dev, struct device_attribute *attr,
@@ -4433,6 +4434,7 @@ default_density_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
return l;
}
+static DEVICE_ATTR_RO(default_density);
static ssize_t
default_compression_show(struct device *dev, struct device_attribute *attr,
@@ -4444,6 +4446,7 @@ default_compression_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
return l;
}
+static DEVICE_ATTR_RO(default_compression);
static ssize_t
options_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -4472,15 +4475,17 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
return l;
}
-
-static struct device_attribute st_dev_attrs[] = {
- __ATTR_RO(defined),
- __ATTR_RO(default_blksize),
- __ATTR_RO(default_density),
- __ATTR_RO(default_compression),
- __ATTR_RO(options),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(options);
+
+static struct attribute *st_dev_attrs[] = {
+ &dev_attr_defined.attr,
+ &dev_attr_default_blksize.attr,
+ &dev_attr_default_density.attr,
+ &dev_attr_default_compression.attr,
+ &dev_attr_options.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(st_dev);
/* The following functions may be useful for a larger audience. */
static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 83ec1aa85964..1a28f5632797 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1879,7 +1879,6 @@ static void __exit storvsc_drv_exit(void)
}
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
module_init(storvsc_drv_init);
module_exit(storvsc_drv_exit);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 9327f5fcec4e..11423615c2ea 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
pACB->SelConn++;
return 1;
}
- if (time_before (jiffies, pACB->pScsiHost->last_reset))
+ if (time_before (jiffies, pACB->last_reset))
{
DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
return 1;
@@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB )
/* delay half a second */
udelay (1000);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
- pACB->pScsiHost->last_reset = jiffies + 5*HZ/2
+ pACB->last_reset = jiffies + 5*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
pACB->Connected = 0;
@@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd)
dc390_ResetDevParam(pACB);
mdelay(1);
- pACB->pScsiHost->last_reset = jiffies + 3*HZ/2
+ pACB->last_reset = jiffies + 3*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
-
+
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
DC390_read8(INT_Status); /* Reset Pending INT */
@@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
if (pACB->Gmode2 & RST_SCSI_BUS) {
dc390_ResetSCSIBus(pACB);
udelay(1000);
- shost->last_reset = jiffies + HZ/2 +
+ pACB->last_reset = jiffies + HZ/2 +
HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
}
@@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq;
shost->base = io_port;
shost->unique_id = io_port;
- shost->last_reset = jiffies;
-
+
+ pACB->last_reset = jiffies;
pACB->pScsiHost = shost;
pACB->IOPortBase = (u16) io_port;
pACB->IRQLevel = pdev->irq;
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index 77adc54dbd16..3d1bb4ad1826 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -143,6 +143,7 @@ u8 Ignore_IRQ; /* Not used */
struct pci_dev *pdev;
+unsigned long last_reset;
unsigned long Cmds;
u32 SelLost;
u32 SelConn;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 139bc0647b41..721050090520 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -36,10 +36,17 @@
#ifndef _UFS_H
#define _UFS_H
+#include <linux/mutex.h>
+#include <linux/types.h>
+
#define MAX_CDB_SIZE 16
+#define GENERAL_UPIU_REQUEST_SIZE 32
+#define QUERY_DESC_MAX_SIZE 256
+#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
+ (sizeof(struct utp_upiu_header)))
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- ((byte3 << 24) | (byte2 << 16) |\
+ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
(byte1 << 8) | (byte0))
/*
@@ -62,7 +69,7 @@ enum {
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
UPIU_TRANSACTION_TASK_REQ = 0x04,
- UPIU_TRANSACTION_QUERY_REQ = 0x26,
+ UPIU_TRANSACTION_QUERY_REQ = 0x16,
};
/* UTP UPIU Transaction Codes Target to Initiator */
@@ -73,6 +80,7 @@ enum {
UPIU_TRANSACTION_TASK_RSP = 0x24,
UPIU_TRANSACTION_READY_XFER = 0x31,
UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
};
/* UPIU Read/Write flags */
@@ -90,8 +98,41 @@ enum {
UPIU_TASK_ATTR_ACA = 0x03,
};
-/* UTP QUERY Transaction Specific Fields OpCode */
+/* UPIU Query request function */
enum {
+ UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+};
+
+/* Exception event mask values */
+enum {
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_URGENT_BKOPS = (1 << 2),
+};
+
+/* Background operation status */
+enum {
+ BKOPS_STATUS_NO_OP = 0x0,
+ BKOPS_STATUS_NON_CRITICAL = 0x1,
+ BKOPS_STATUS_PERF_IMPACT = 0x2,
+ BKOPS_STATUS_CRITICAL = 0x3,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
UPIU_QUERY_OPCODE_NOP = 0x0,
UPIU_QUERY_OPCODE_READ_DESC = 0x1,
UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
@@ -103,6 +144,21 @@ enum {
UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
};
+/* Query response result code */
+enum {
+ QUERY_RESULT_SUCCESS = 0x00,
+ QUERY_RESULT_NOT_READABLE = 0xF6,
+ QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ QUERY_RESULT_INVALID_VALUE = 0xFA,
+ QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ QUERY_RESULT_INVALID_INDEX = 0xFC,
+ QUERY_RESULT_INVALID_IDN = 0xFD,
+ QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+};
+
/* UTP Transfer Request Command Type (CT) */
enum {
UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
@@ -110,10 +166,19 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
+/* UTP Transfer Request Command Offset */
+#define UPIU_COMMAND_TYPE_OFFSET 28
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_EXCEPTION_EVENT = 0x10000,
};
/* Task management service response */
@@ -138,26 +203,59 @@ struct utp_upiu_header {
/**
* struct utp_upiu_cmd - Command UPIU structure
- * @header: UPIU header structure DW-0 to DW-2
* @data_transfer_len: Data Transfer Length DW-3
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
*/
struct utp_upiu_cmd {
- struct utp_upiu_header header;
u32 exp_data_transfer_len;
u8 cdb[MAX_CDB_SIZE];
};
/**
- * struct utp_upiu_rsp - Response UPIU structure
- * @header: UPIU header DW-0 to DW-2
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ u8 opcode;
+ u8 idn;
+ u8 index;
+ u8 selector;
+ u16 reserved_osf;
+ u16 length;
+ u32 value;
+ u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
+ * struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
* @sense_data_len: Sense data length DW-8 U16
* @sense_data: Sense data field DW-8 to DW-12
*/
-struct utp_upiu_rsp {
- struct utp_upiu_header header;
+struct utp_cmd_rsp {
u32 residual_transfer_count;
u32 reserved[4];
u16 sense_data_len;
@@ -165,6 +263,20 @@ struct utp_upiu_rsp {
};
/**
+ * struct utp_upiu_rsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ union {
+ struct utp_cmd_rsp sr;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
* struct utp_upiu_task_req - Task request UPIU structure
* @header - UPIU header structure DW0 to DW-2
* @input_param1: Input parameter 1 DW-3
@@ -194,4 +306,24 @@ struct utp_upiu_task_rsp {
u32 reserved[3];
};
+/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ u8 response;
+ struct utp_upiu_query upiu_res;
+};
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 48be39a6f6d7..a823cf44e949 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -35,6 +35,7 @@
#include "ufshcd.h"
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
/**
@@ -44,7 +45,7 @@
*
* Returns -ENOSYS
*/
-static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ufshcd_pci_suspend(struct device *dev)
{
/*
* TODO:
@@ -61,7 +62,7 @@ static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
*
* Returns -ENOSYS
*/
-static int ufshcd_pci_resume(struct pci_dev *pdev)
+static int ufshcd_pci_resume(struct device *dev)
{
/*
* TODO:
@@ -71,8 +72,45 @@ static int ufshcd_pci_resume(struct pci_dev *pdev)
return -ENOSYS;
}
+#else
+#define ufshcd_pci_suspend NULL
+#define ufshcd_pci_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+static int ufshcd_pci_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_suspend(hba);
+}
+static int ufshcd_pci_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_resume(hba);
+}
+static int ufshcd_pci_runtime_idle(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_idle(hba);
+}
+#else /* !CONFIG_PM_RUNTIME */
+#define ufshcd_pci_runtime_suspend NULL
+#define ufshcd_pci_runtime_resume NULL
+#define ufshcd_pci_runtime_idle NULL
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -91,12 +129,10 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
{
struct ufs_hba *hba = pci_get_drvdata(pdev);
- disable_irq(pdev->irq);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
}
/**
@@ -133,55 +169,49 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *mmio_base;
int err;
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
- goto out_error;
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
}
pci_set_master(pdev);
-
- err = pci_request_regions(pdev, UFSHCD);
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
}
- mmio_base = pci_ioremap_bar(pdev, 0);
- if (!mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
+ mmio_base = pcim_iomap_table(pdev)[0];
err = ufshcd_set_dma_mask(pdev);
if (err) {
dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
+ return err;
}
err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- goto out_iounmap;
+ return err;
}
pci_set_drvdata(pdev, hba);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
return 0;
-
-out_iounmap:
- iounmap(mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-out_error:
- return err;
}
+static const struct dev_pm_ops ufshcd_pci_pm_ops = {
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .runtime_suspend = ufshcd_pci_runtime_suspend,
+ .runtime_resume = ufshcd_pci_runtime_resume,
+ .runtime_idle = ufshcd_pci_runtime_idle,
+};
+
static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ } /* terminate list */
@@ -195,10 +225,9 @@ static struct pci_driver ufshcd_pci_driver = {
.probe = ufshcd_pci_probe,
.remove = ufshcd_pci_remove,
.shutdown = ufshcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
-#endif
+ .driver = {
+ .pm = &ufshcd_pci_pm_ops
+ },
};
module_pci_driver(ufshcd_pci_driver);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index c42db40d4e51..5e4623225422 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -34,6 +34,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "ufshcd.h"
@@ -87,6 +88,40 @@ static int ufshcd_pltfrm_resume(struct device *dev)
#define ufshcd_pltfrm_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_suspend(hba);
+}
+static int ufshcd_pltfrm_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_resume(hba);
+}
+static int ufshcd_pltfrm_runtime_idle(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_idle(hba);
+}
+#else /* !CONFIG_PM_RUNTIME */
+#define ufshcd_pltfrm_runtime_suspend NULL
+#define ufshcd_pltfrm_runtime_resume NULL
+#define ufshcd_pltfrm_runtime_idle NULL
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* ufshcd_pltfrm_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
@@ -102,15 +137,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(dev, "Memory resource not available\n");
- err = -ENODEV;
- goto out;
- }
-
mmio_base = devm_ioremap_resource(dev, mem_res);
if (IS_ERR(mmio_base)) {
- dev_err(dev, "memory map failed\n");
err = PTR_ERR(mmio_base);
goto out;
}
@@ -122,14 +150,22 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
goto out;
}
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
err = ufshcd_init(dev, &hba, mmio_base, irq);
if (err) {
dev_err(dev, "Intialization failed\n");
- goto out;
+ goto out_disable_rpm;
}
platform_set_drvdata(pdev, hba);
+ return 0;
+
+out_disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
out:
return err;
}
@@ -144,7 +180,7 @@ static int ufshcd_pltfrm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
- disable_irq(hba->irq);
+ pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
@@ -157,6 +193,9 @@ static const struct of_device_id ufs_of_match[] = {
static const struct dev_pm_ops ufshcd_dev_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
};
static struct platform_driver ufshcd_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b743bd6fce6b..04884d663e4e 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -36,13 +36,31 @@
#include <linux/async.h>
#include "ufshcd.h"
+#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
+ UIC_POWER_MODE |\
UFSHCD_ERROR_MASK)
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 10
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 30 /* msec */
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
+/* Interrupt aggregation default timeout, unit: 40us */
+#define INT_AGGR_DEF_TO 0x02
+
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -65,11 +83,39 @@ enum {
UFSHCD_INT_CLEAR,
};
-/* Interrupt aggregation options */
-enum {
- INT_AGGR_RESET,
- INT_AGGR_CONFIG,
-};
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us, unsigned long timeout_ms)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ /* wakeup within 50us of expiry */
+ usleep_range(interval_us, interval_us + 50);
+
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
/**
* ufshcd_get_intr_mask - Get the interrupt bit mask
@@ -191,18 +237,25 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
}
/**
- * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
- * @ucd_rsp_ptr: pointer to response UPIU
+ * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
+ * @hba: Pointer to adapter instance
*
- * This function checks the response UPIU for valid transaction type in
- * response field
- * Returns 0 on success, non-zero on failure
+ * This function gets UIC command argument3
+ * Returns 0 on success, non zero value on error
+ */
+static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
+{
+ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
+}
+
+/**
+ * ufshcd_get_req_rsp - returns the TR response transaction type
+ * @ucd_rsp_ptr: pointer to response UPIU
*/
static inline int
-ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
- UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
}
/**
@@ -218,31 +271,60 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
}
+/*
+ * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
+ * from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * Return the data segment length.
+ */
+static inline unsigned int
+ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_UPIU_DATA_SEG_LEN;
+}
+
/**
- * ufshcd_config_int_aggr - Configure interrupt aggregation values.
- * Currently there is no use case where we want to configure
- * interrupt aggregation dynamically. So to configure interrupt
- * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
- * INT_AGGR_TIMEOUT_VALUE are used.
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
+ * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
* @hba: per adapter instance
- * @option: Interrupt aggregation option
*/
static inline void
-ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+ufshcd_reset_intr_aggr(struct ufs_hba *hba)
{
- switch (option) {
- case INT_AGGR_RESET:
- ufshcd_writel(hba, INT_AGGR_ENABLE |
- INT_AGGR_COUNTER_AND_TIMER_RESET,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
- break;
- case INT_AGGR_CONFIG:
- ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
- INT_AGGR_COUNTER_THRESHOLD_VALUE |
- INT_AGGR_TIMEOUT_VALUE,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
- break;
- }
+ ufshcd_writel(hba, INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
+}
+
+/**
+ * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
+ * @hba: per adapter instance
+ * @cnt: Interrupt aggregation counter threshold
+ * @tmout: Interrupt aggregation timeout value
+ */
+static inline void
+ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
+{
+ ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THLD_VAL(cnt) |
+ INT_AGGR_TIMEOUT_VAL(tmout),
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
@@ -298,15 +380,70 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
int len;
- if (lrbp->sense_buffer) {
- len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+ if (lrbp->sense_buffer &&
+ ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
memcpy(lrbp->sense_buffer,
- lrbp->ucd_rsp_ptr->sense_data,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
min_t(int, len, SCSI_SENSE_BUFFERSIZE));
}
}
/**
+ * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
+ * @response: upiu query response to convert
+ */
+static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
+{
+ response->length = be16_to_cpu(response->length);
+ response->value = be32_to_cpu(response->value);
+}
+
+/**
+ * ufshcd_query_to_be() - formats the buffer to big endian
+ * @request: upiu query request to convert
+ */
+static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
+{
+ request->length = cpu_to_be16(request->length);
+ request->value = cpu_to_be32(request->value);
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrb - pointer to local reference block
+ */
+static
+void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+ ufshcd_query_to_cpu(&query_res->upiu_res);
+
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 len;
+
+ /* data segment length */
+ len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+
+ memcpy(hba->dev_cmd.query.descriptor, descp,
+ min_t(u16, len, QUERY_DESC_MAX_SIZE));
+ }
+}
+
+/**
* ufshcd_hba_capabilities - Read controller capabilities
* @hba: per adapter instance
*/
@@ -335,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
}
/**
+ * ufshcd_get_upmcrs - Get the power mode change request status
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the UPMCRS field of HCS register
+ * Returns value of UPMCRS field
+ */
+static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
+{
+ return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
+}
+
+/**
* ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
* @hba: per adapter instance
* @uic_cmd: UIC command
@@ -519,76 +668,170 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp - local reference block pointer
+ * @upiu_flags - flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
+ (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = query->request.upiu_req.length;
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length */
+ ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
+ 0, 0, len >> 8, (u8)len);
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+ ufshcd_query_to_be(&ucd_req_ptr->qr);
+
+ /* Copy the Descriptor */
+ if ((len > 0) && (query->request.upiu_req.opcode ==
+ UPIU_QUERY_OPCODE_WRITE_DESC)) {
+ memcpy(descp, query->descriptor,
+ min_t(u16, len, QUERY_DESC_MAX_SIZE));
+ }
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+}
+
+/**
* ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @hba - per adapter instance
* @lrb - pointer to local reference block
*/
-static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
- struct utp_transfer_req_desc *req_desc;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- u32 data_direction;
u32 upiu_flags;
-
- ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
- req_desc = lrbp->utr_descriptor_ptr;
+ int ret = 0;
switch (lrbp->command_type) {
case UTP_CMD_TYPE_SCSI:
- if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
- data_direction = UTP_DEVICE_TO_HOST;
- upiu_flags = UPIU_CMD_FLAGS_READ;
- } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
- data_direction = UTP_HOST_TO_DEVICE;
- upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
- data_direction = UTP_NO_DATA_TRANSFER;
- upiu_flags = UPIU_CMD_FLAGS_NONE;
+ ret = -EINVAL;
}
-
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 =
- cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
-
- /*
- * assigning invalid value for command status. Controller
- * updates OCS on command completion, with the command
- * status
- */
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* command descriptor fields */
- ucd_cmd_ptr->header.dword_0 =
- cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
- upiu_flags,
- lrbp->lun,
- lrbp->task_tag));
- ucd_cmd_ptr->header.dword_1 =
- cpu_to_be32(
- UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
- 0,
- 0,
- 0));
-
- /* Total EHS length and Data segment length will be zero */
- ucd_cmd_ptr->header.dword_2 = 0;
-
- ucd_cmd_ptr->exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->sdb.length);
-
- memcpy(ucd_cmd_ptr->cdb,
- lrbp->cmd->cmnd,
- (min_t(unsigned short,
- lrbp->cmd->cmd_len,
- MAX_CDB_SIZE)));
break;
case UTP_CMD_TYPE_DEV_MANAGE:
- /* For query function implementation */
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(
+ hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
break;
case UTP_CMD_TYPE_UFS:
/* For UFS native command implementation */
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: UFS native command are not supported\n",
+ __func__);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
+ __func__, lrbp->command_type);
break;
} /* end of switch */
+
+ return ret;
}
/**
@@ -615,21 +858,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = cmd->device->lun;
-
+ lrbp->intr_cmd = false;
lrbp->command_type = UTP_CMD_TYPE_SCSI;
/* form UPIU before issuing the command */
- ufshcd_compose_upiu(lrbp);
+ ufshcd_compose_upiu(hba, lrbp);
err = ufshcd_map_sg(lrbp);
- if (err)
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
+ }
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -639,6 +898,338 @@ out:
return err;
}
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_compose_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000);
+
+ return err;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* sucessfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba - UFS hba
+ * @cmd_type - specifies the type (NOP, Query...)
+ * @timeout - time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ return err;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * hba: per-adapter instance
+ * query_opcode: flag query to perform
+ * idn: flag idn to access
+ * flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request;
+ struct ufs_query_res *response;
+ int err;
+
+ BUG_ON(!hba);
+
+ mutex_lock(&hba->dev_cmd.lock);
+ request = &hba->dev_cmd.query.request;
+ response = &hba->dev_cmd.query.response;
+ memset(request, 0, sizeof(struct ufs_query_req));
+ memset(response, 0, sizeof(struct ufs_query_res));
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ request->upiu_req.opcode = opcode;
+ request->upiu_req.idn = idn;
+
+ /* Send query request */
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
+ QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (response->upiu_res.value &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request;
+ struct ufs_query_res *response;
+ int err;
+
+ BUG_ON(!hba);
+
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ request = &hba->dev_cmd.query.request;
+ response = &hba->dev_cmd.query.response;
+ memset(request, 0, sizeof(struct ufs_query_req));
+ memset(response, 0, sizeof(struct ufs_query_res));
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = *attr_val;
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ request->upiu_req.opcode = opcode;
+ request->upiu_req.idn = idn;
+ request->upiu_req.index = index;
+ request->upiu_req.selector = selector;
+
+ /* Send query request */
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
+ QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_unlock;
+ }
+
+ *attr_val = response->upiu_res.value;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ return err;
+}
+
/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
@@ -774,8 +1365,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
- hba->lrb[i].ucd_cmd_ptr =
- (struct utp_upiu_cmd *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
hba->lrb[i].ucd_rsp_ptr =
(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
hba->lrb[i].ucd_prdt_ptr =
@@ -809,6 +1400,253 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
}
/**
+ * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @attr_set: attribute set type as uic command argument2
+ * @mib_val: setting value as uic command argument3
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-set",
+ "dme-peer-set"
+ };
+ const char *set = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
+ uic_cmd.argument1 = attr_sel;
+ uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
+ uic_cmd.argument3 = mib_val;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
+
+/**
+ * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
+ * @hba: per adapter instance
+ * @attr_sel: uic command argument1
+ * @mib_val: the value of the attribute as returned by the UIC command
+ * @peer: indicate whether peer or local
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer)
+{
+ struct uic_command uic_cmd = {0};
+ static const char *const action[] = {
+ "dme-get",
+ "dme-peer-get"
+ };
+ const char *get = action[!!peer];
+ int ret;
+
+ uic_cmd.command = peer ?
+ UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+ uic_cmd.argument1 = attr_sel;
+
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret) {
+ dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ goto out;
+ }
+
+ if (mib_val)
+ *mib_val = uic_cmd.argument3;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
+
+/**
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ * using DME_SET primitives.
+ * @hba: per adapter instance
+ * @mode: powr mode value
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+{
+ struct uic_command uic_cmd = {0};
+ struct completion pwr_done;
+ unsigned long flags;
+ u8 status;
+ int ret;
+
+ uic_cmd.command = UIC_CMD_DME_SET;
+ uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+ uic_cmd.argument3 = mode;
+ init_completion(&pwr_done);
+
+ mutex_lock(&hba->uic_cmd_mutex);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pwr_done = &pwr_done;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret) {
+ dev_err(hba->dev,
+ "pwr mode change with mode 0x%x uic error %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(hba->pwr_done,
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ dev_err(hba->dev,
+ "pwr mode change with mode 0x%x completion timeout\n",
+ mode);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = ufshcd_get_upmcrs(hba);
+ if (status != PWR_LOCAL) {
+ dev_err(hba->dev,
+ "pwr mode change failed, host umpcrs:0x%x\n",
+ status);
+ ret = (status != PWR_OK) ? status : -1;
+ }
+out:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pwr_done = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ mutex_unlock(&hba->uic_cmd_mutex);
+ return ret;
+}
+
+/**
+ * ufshcd_config_max_pwr_mode - Set & Change power mode with
+ * maximum capability attribute information.
+ * @hba: per adapter instance
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+{
+ enum {RX = 0, TX = 1};
+ u32 lanes[] = {1, 1};
+ u32 gear[] = {1, 1};
+ u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+ int ret;
+
+ /* Get the connected lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+
+ /*
+ * First, get the maximum gears of HS speed.
+ * If a zero value, it means there is no HSGEAR capability.
+ * Then, get the maximum gears of PWM speed.
+ */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
+ if (!gear[RX]) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
+ pwr[RX] = SLOWAUTO_MODE;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
+ if (!gear[TX]) {
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &gear[TX]);
+ pwr[TX] = SLOWAUTO_MODE;
+ }
+
+ /*
+ * Configure attributes for power mode change with below.
+ * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+ * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+ * - PA_HSSERIES
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
+ if (pwr[RX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
+ if (pwr[TX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+
+ if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
+ if (ret)
+ dev_err(hba->dev,
+ "pwr_mode: power mode change failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i, retries, err = 0;
+ bool flag_res = 1;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Set the fDeviceInit flag */
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 100 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 100 && !err && flag_res; i++) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = ufshcd_query_flag(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
+ err);
+ }
+ }
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
* ufshcd_make_hba_operational - Make UFS controller operational
* @hba: per adapter instance
*
@@ -838,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
/* Configure interrupt aggregation */
- ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+ ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
/* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -961,6 +1799,38 @@ out:
}
/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
* ufshcd_do_reset - reset the host controller
* @hba: per adapter instance
*
@@ -986,13 +1856,20 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
for (tag = 0; tag < hba->nutrs; tag++) {
if (test_bit(tag, &hba->outstanding_reqs)) {
lrbp = &hba->lrb[tag];
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd->result = DID_RESET << 16;
- lrbp->cmd->scsi_done(lrbp->cmd);
- lrbp->cmd = NULL;
+ if (lrbp->cmd) {
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd->result = DID_RESET << 16;
+ lrbp->cmd->scsi_done(lrbp->cmd);
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ }
}
}
+ /* complete device management command */
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
+
/* clear outstanding request/task bit maps */
hba->outstanding_reqs = 0;
hba->outstanding_tasks = 0;
@@ -1145,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
int result = 0;
switch (scsi_status) {
- case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
- break;
case SAM_STAT_CHECK_CONDITION:
+ ufshcd_copy_sense_data(lrbp);
+ case SAM_STAT_GOOD:
result |= DID_OK << 16 |
COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
- ufshcd_copy_sense_data(lrbp);
- break;
- case SAM_STAT_BUSY:
- result |= SAM_STAT_BUSY;
+ scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
-
/*
* If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
* depth needs to be adjusted to the exact number of
* outstanding commands the LUN can handle at any given time.
*/
ufshcd_adjust_lun_qdepth(lrbp->cmd);
- result |= SAM_STAT_TASK_SET_FULL;
- break;
+ case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
- result |= SAM_STAT_TASK_ABORTED;
+ ufshcd_copy_sense_data(lrbp);
+ result |= scsi_status;
break;
default:
result |= DID_ERROR << 16;
@@ -1199,27 +2068,39 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
switch (ocs) {
case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
- /* check if the returned transfer response is valid */
- result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
- if (result) {
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
dev_err(hba->dev,
- "Invalid response = %x\n", result);
+ "Unexpected request response code = %x\n",
+ result);
break;
}
-
- /*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
- * get the result based on SCSI status response
- * to notify the SCSI midlayer of the command status
- */
- scsi_status = result & MASK_SCSI_STATUS;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
break;
case OCS_ABORTED:
result |= DID_ABORT << 16;
@@ -1243,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
+ * @intr_status: interrupt status generated by the controller
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
+static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
- if (hba->active_uic_cmd) {
+ if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
+ hba->active_uic_cmd->argument3 =
+ ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
}
+
+ if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
+ complete(hba->pwr_done);
}
/**
@@ -1259,28 +2146,40 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
*/
static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
- struct ufshcd_lrb *lrb;
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
unsigned long completed_reqs;
u32 tr_doorbell;
int result;
int index;
+ bool int_aggr_reset = false;
- lrb = hba->lrb;
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
for (index = 0; index < hba->nutrs; index++) {
if (test_bit(index, &completed_reqs)) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ /*
+ * Don't skip resetting interrupt aggregation counters
+ * if a regular command is present.
+ */
+ int_aggr_reset |= !lrbp->intr_cmd;
- result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
-
- if (lrb[index].cmd) {
- scsi_dma_unmap(lrb[index].cmd);
- lrb[index].cmd->result = result;
- lrb[index].cmd->scsi_done(lrb[index].cmd);
-
+ if (cmd) {
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
/* Mark completed command as NULL in LRB */
- lrb[index].cmd = NULL;
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ } else if (lrbp->command_type ==
+ UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
}
} /* end of if */
} /* end of for */
@@ -1288,8 +2187,238 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+
/* Reset interrupt aggregation counters */
- ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+ if (int_aggr_reset)
+ ufshcd_reset_intr_aggr(hba);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. Do this by forcing enable of auto bkops.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ int err;
+ u32 status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status = status & 0xF;
+
+ /* handle only if status indicates performance impact or critical */
+ if (status >= BKOPS_STATUS_PERF_IMPACT)
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ return err;
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+ if (status & MASK_EE_URGENT_BKOPS) {
+ err = ufshcd_urgent_bkops(hba);
+ if (err)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+ }
+out:
+ pm_runtime_put_sync(hba->dev);
+ return;
}
/**
@@ -1301,9 +2430,11 @@ static void ufshcd_fatal_err_handler(struct work_struct *work)
struct ufs_hba *hba;
hba = container_of(work, struct ufs_hba, feh_workq);
+ pm_runtime_get_sync(hba->dev);
/* check if reset is already in progress */
if (hba->ufshcd_state != UFSHCD_STATE_RESET)
ufshcd_do_reset(hba);
+ pm_runtime_put_sync(hba->dev);
}
/**
@@ -1352,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (hba->errors)
ufshcd_err_handler(hba);
- if (intr_status & UIC_COMMAND_COMPL)
- ufshcd_uic_cmd_compl(hba);
+ if (intr_status & UFSHCD_UIC_MASK)
+ ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
ufshcd_tmc_handler(hba);
@@ -1432,10 +2563,10 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
task_req_upiup =
(struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
task_req_upiup->header.dword_0 =
- cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lrbp->lun, lrbp->task_tag));
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lrbp->lun, lrbp->task_tag);
task_req_upiup->header.dword_1 =
- cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
task_req_upiup->input_param1 = lrbp->lun;
task_req_upiup->input_param1 =
@@ -1502,9 +2633,11 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
if (hba->lrb[pos].cmd) {
scsi_dma_unmap(hba->lrb[pos].cmd);
hba->lrb[pos].cmd->result =
- DID_ABORT << 16;
+ DID_ABORT << 16;
hba->lrb[pos].cmd->scsi_done(cmd);
hba->lrb[pos].cmd = NULL;
+ clear_bit_unlock(pos, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
}
}
} /* end of for */
@@ -1572,6 +2705,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
__clear_bit(tag, &hba->outstanding_reqs);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
out:
return err;
}
@@ -1587,8 +2723,24 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
int ret;
ret = ufshcd_link_startup(hba);
- if (!ret)
- scsi_scan_host(hba->host);
+ if (ret)
+ goto out;
+
+ ufshcd_config_max_pwr_mode(hba);
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ufshcd_force_reset_auto_bkops(hba);
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return;
}
static struct scsi_host_template ufshcd_driver_template = {
@@ -1650,6 +2802,34 @@ int ufshcd_resume(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_resume);
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations.
+ */
+ return ufshcd_enable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ return ufshcd_disable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
@@ -1657,11 +2837,11 @@ EXPORT_SYMBOL_GPL(ufshcd_resume);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
- scsi_remove_host(hba->host);
scsi_host_put(hba->host);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -1740,10 +2920,17 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
/* Initialize work queues */
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
@@ -1773,6 +2960,9 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
*hba_handle = hba;
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+
async_schedule(ufshcd_async_scan, hba);
return 0;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 49590ee07acc..577679a2d189 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -68,6 +68,11 @@
#define UFSHCD "ufshcd"
#define UFSHCD_DRIVER_VERSION "0.2"
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+};
+
/**
* struct uic_command - UIC command structure
* @command: UIC command
@@ -91,7 +96,7 @@ struct uic_command {
/**
* struct ufshcd_lrb - local reference block
* @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_req_ptr: UCD address of the command
* @ucd_rsp_ptr: Response UPIU address for this command
* @ucd_prdt_ptr: PRDT address of the command
* @cmd: pointer to SCSI command
@@ -101,10 +106,11 @@ struct uic_command {
* @command_type: SCSI, UFS, Query.
* @task_tag: Task tag of the command
* @lun: LUN of the command
+ * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_req *ucd_req_ptr;
struct utp_upiu_rsp *ucd_rsp_ptr;
struct ufshcd_sg_entry *ucd_prdt_ptr;
@@ -116,8 +122,35 @@ struct ufshcd_lrb {
int command_type;
int task_tag;
unsigned int lun;
+ bool intr_cmd;
};
+/**
+ * struct ufs_query - holds relevent data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @lock: lock to allow one command at a time
+ * @complete: internal commands completion
+ * @tag_wq: wait queue until free command slot is available
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct mutex lock;
+ struct completion *complete;
+ wait_queue_head_t tag_wq;
+ struct ufs_query query;
+};
/**
* struct ufs_hba - per adapter private structure
@@ -131,6 +164,7 @@ struct ufshcd_lrb {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
+ * @lrb_in_use: lrb in use
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -141,11 +175,16 @@ struct ufshcd_lrb {
* @active_uic_cmd: handle of active UIC command
* @uic_cmd_mutex: mutex for uic command
* @ufshcd_tm_wait_queue: wait queue for task management
+ * @pwr_done: completion for power mode change
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
* @feh_workq: Work queue for fatal controller error handling
+ * @eeh_work: Worker to handle exception events
* @errors: HBA errors
+ * @dev_cmd: ufs device management command information
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -164,6 +203,7 @@ struct ufs_hba {
struct device *dev;
struct ufshcd_lrb *lrb;
+ unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -180,14 +220,23 @@ struct ufs_hba {
wait_queue_head_t ufshcd_tm_wait_queue;
unsigned long tm_condition;
+ struct completion *pwr_done;
+
u32 ufshcd_state;
u32 intr_mask;
+ u16 ee_ctrl_mask;
/* Work Queues */
struct work_struct feh_workq;
+ struct work_struct eeh_work;
/* HBA Errors */
u32 errors;
+
+ /* Device management request data */
+ struct ufs_dev_cmd dev_cmd;
+
+ bool auto_bkops_enabled;
};
#define ufshcd_writel(hba, val, reg) \
@@ -208,4 +257,64 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
}
+static inline void check_upiu_size(void)
+{
+ BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
+ GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
+}
+
+extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
+extern int ufshcd_runtime_resume(struct ufs_hba *hba);
+extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
+ u8 attr_set, u32 mib_val, u8 peer);
+extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
+ u32 *mib_val, u8 peer);
+
+/* UIC command interfaces for DME primitives */
+#define DME_LOCAL 0
+#define DME_PEER 1
+#define ATTR_SET_NOR 0 /* NORMAL */
+#define ATTR_SET_ST 1 /* STATIC */
+
+static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
+ u32 mib_val)
+{
+ return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
+ mib_val, DME_PEER);
+}
+
+static inline int ufshcd_dme_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
+}
+
+static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
+ u32 attr_sel, u32 *mib_val)
+{
+ return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d5c5f1482d7d..0475c6619a68 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,7 +39,7 @@
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
TASK_RSP_UPIU_SIZE_DWORDS = 8,
- ALIGNED_UPIU_SIZE = 128,
+ ALIGNED_UPIU_SIZE = 512,
};
/* UFSHCI Registers */
@@ -124,6 +124,9 @@ enum {
#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\
+ UIC_POWER_MODE)
+
#define UFSHCD_ERROR_MASK (UIC_ERROR |\
DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
@@ -142,6 +145,15 @@ enum {
#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+enum {
+ PWR_OK = 0x0,
+ PWR_LOCAL = 0x01,
+ PWR_REMOTE = 0x02,
+ PWR_BUSY = 0x03,
+ PWR_ERROR_CAP = 0x04,
+ PWR_FATAL_ERROR = 0x05,
+};
+
/* HCE - Host Controller Enable 34h */
#define CONTROLLER_ENABLE UFS_BIT(0)
#define CONTROLLER_DISABLE 0x0
@@ -191,6 +203,12 @@ enum {
#define CONFIG_RESULT_CODE_MASK 0xFF
#define GENERIC_ERROR_CODE_MASK 0xFF
+#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
+ ((sel) & 0xFFFF))
+#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
+#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
+#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+
/* UIC Commands */
enum {
UIC_CMD_DME_GET = 0x01,
@@ -226,8 +244,8 @@ enum {
#define MASK_UIC_COMMAND_RESULT 0xFF
-#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8)
-#define INT_AGGR_TIMEOUT_VALUE (0x02)
+#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
+#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
/* Interrupt disable masks */
enum {
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644
index 000000000000..0bb8041c047a
--- /dev/null
+++ b/drivers/scsi/ufs/unipro.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/scsi/ufs/unipro.h
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UNIPRO_H_
+#define _UNIPRO_H_
+
+/*
+ * PHY Adpater attributes
+ */
+#define PA_ACTIVETXDATALANES 0x1560
+#define PA_ACTIVERXDATALANES 0x1580
+#define PA_TXTRAILINGCLOCKS 0x1564
+#define PA_PHY_TYPE 0x1500
+#define PA_AVAILTXDATALANES 0x1520
+#define PA_AVAILRXDATALANES 0x1540
+#define PA_MINRXTRAILINGCLOCKS 0x1543
+#define PA_TXPWRSTATUS 0x1567
+#define PA_RXPWRSTATUS 0x1582
+#define PA_TXFORCECLOCK 0x1562
+#define PA_TXPWRMODE 0x1563
+#define PA_LEGACYDPHYESCDL 0x1570
+#define PA_MAXTXSPEEDFAST 0x1521
+#define PA_MAXTXSPEEDSLOW 0x1522
+#define PA_MAXRXSPEEDFAST 0x1541
+#define PA_MAXRXSPEEDSLOW 0x1542
+#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_TXSPEEDFAST 0x1565
+#define PA_TXSPEEDSLOW 0x1566
+#define PA_REMOTEVERINFO 0x15A0
+#define PA_TXGEAR 0x1568
+#define PA_TXTERMINATION 0x1569
+#define PA_HSSERIES 0x156A
+#define PA_PWRMODE 0x1571
+#define PA_RXGEAR 0x1583
+#define PA_RXTERMINATION 0x1584
+#define PA_MAXRXPWMGEAR 0x1586
+#define PA_MAXRXHSGEAR 0x1587
+#define PA_RXHSUNTERMCAP 0x15A5
+#define PA_RXLSTERMCAP 0x15A6
+#define PA_PACPREQTIMEOUT 0x1590
+#define PA_PACPREQEOBTIMEOUT 0x1591
+#define PA_HIBERN8TIME 0x15A7
+#define PA_LOCALVERINFO 0x15A9
+#define PA_TACTIVATE 0x15A8
+#define PA_PACPFRAMECOUNT 0x15C0
+#define PA_PACPERRORCOUNT 0x15C1
+#define PA_PHYTESTCONTROL 0x15C2
+#define PA_PWRMODEUSERDATA0 0x15B0
+#define PA_PWRMODEUSERDATA1 0x15B1
+#define PA_PWRMODEUSERDATA2 0x15B2
+#define PA_PWRMODEUSERDATA3 0x15B3
+#define PA_PWRMODEUSERDATA4 0x15B4
+#define PA_PWRMODEUSERDATA5 0x15B5
+#define PA_PWRMODEUSERDATA6 0x15B6
+#define PA_PWRMODEUSERDATA7 0x15B7
+#define PA_PWRMODEUSERDATA8 0x15B8
+#define PA_PWRMODEUSERDATA9 0x15B9
+#define PA_PWRMODEUSERDATA10 0x15BA
+#define PA_PWRMODEUSERDATA11 0x15BB
+#define PA_CONNECTEDTXDATALANES 0x1561
+#define PA_CONNECTEDRXDATALANES 0x1581
+#define PA_LOGICALLANEMAP 0x15A1
+#define PA_SLEEPNOCONFIGTIME 0x15A2
+#define PA_STALLNOCONFIGTIME 0x15A3
+#define PA_SAVECONFIGTIME 0x15A4
+
+/* PA power modes */
+enum {
+ FAST_MODE = 1,
+ SLOW_MODE = 2,
+ FASTAUTO_MODE = 4,
+ SLOWAUTO_MODE = 5,
+ UNCHANGED = 7,
+};
+
+/* PA TX/RX Frequency Series */
+enum {
+ PA_HS_MODE_A = 1,
+ PA_HS_MODE_B = 2,
+};
+
+/*
+ * Data Link Layer Attributes
+ */
+#define DL_TC0TXFCTHRESHOLD 0x2040
+#define DL_FC0PROTTIMEOUTVAL 0x2041
+#define DL_TC0REPLAYTIMEOUTVAL 0x2042
+#define DL_AFC0REQTIMEOUTVAL 0x2043
+#define DL_AFC0CREDITTHRESHOLD 0x2044
+#define DL_TC0OUTACKTHRESHOLD 0x2045
+#define DL_TC1TXFCTHRESHOLD 0x2060
+#define DL_FC1PROTTIMEOUTVAL 0x2061
+#define DL_TC1REPLAYTIMEOUTVAL 0x2062
+#define DL_AFC1REQTIMEOUTVAL 0x2063
+#define DL_AFC1CREDITTHRESHOLD 0x2064
+#define DL_TC1OUTACKTHRESHOLD 0x2065
+#define DL_TXPREEMPTIONCAP 0x2000
+#define DL_TC0TXMAXSDUSIZE 0x2001
+#define DL_TC0RXINITCREDITVAL 0x2002
+#define DL_TC0TXBUFFERSIZE 0x2005
+#define DL_PEERTC0PRESENT 0x2046
+#define DL_PEERTC0RXINITCREVAL 0x2047
+#define DL_TC1TXMAXSDUSIZE 0x2003
+#define DL_TC1RXINITCREDITVAL 0x2004
+#define DL_TC1TXBUFFERSIZE 0x2006
+#define DL_PEERTC1PRESENT 0x2066
+#define DL_PEERTC1RXINITCREVAL 0x2067
+
+/*
+ * Network Layer Attributes
+ */
+#define N_DEVICEID 0x3000
+#define N_DEVICEID_VALID 0x3001
+#define N_TC0TXMAXSDUSIZE 0x3020
+#define N_TC1TXMAXSDUSIZE 0x3021
+
+/*
+ * Transport Layer Attributes
+ */
+#define T_NUMCPORTS 0x4000
+#define T_NUMTESTFEATURES 0x4001
+#define T_CONNECTIONSTATE 0x4020
+#define T_PEERDEVICEID 0x4021
+#define T_PEERCPORTID 0x4022
+#define T_TRAFFICCLASS 0x4023
+#define T_PROTOCOLID 0x4024
+#define T_CPORTFLAGS 0x4025
+#define T_TXTOKENVALUE 0x4026
+#define T_RXTOKENVALUE 0x4027
+#define T_LOCALBUFFERSPACE 0x4028
+#define T_PEERBUFFERSPACE 0x4029
+#define T_CREDITSTOSEND 0x402A
+#define T_CPORTMODE 0x402B
+#define T_TC0TXMAXSDUSIZE 0x4060
+#define T_TC1TXMAXSDUSIZE 0x4061
+
+/* Boolean attribute values */
+enum {
+ FALSE = 0,
+ TRUE,
+};
+
+#endif /* _UNIPRO_H_ */