summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2012-05-24 12:13:01 +0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2012-05-24 12:13:01 +0400
commite644dae645e167d154c0526358940986682a72b0 (patch)
tree972993c6568085b8d407fc7e13de10f4b93c651d /drivers/scsi
parent899c612d74d4a242158a4db20367388d6299c028 (diff)
parent86809173ce32ef03bd4d0389dfc72df0c805e9c4 (diff)
downloadlinux-e644dae645e167d154c0526358940986682a72b0.tar.xz
Merge branch 'next' into for-linus
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/BusLogic.c1
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h27
-rw-r--r--drivers/scsi/aacraid/comminit.c21
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/aacraid/rx.c1
-rw-r--r--drivers/scsi/aacraid/sa.c1
-rw-r--r--drivers/scsi/aacraid/src.c293
-rw-r--r--drivers/scsi/advansys.c1
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c38
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/arm/acornscsi.c1
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c1
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/arm/fas216.h4
-rw-r--r--drivers/scsi/arm/oak.c1
-rw-r--r--drivers/scsi/atp870u.c5
-rw-r--r--drivers/scsi/bfa/bfa.h9
-rw-r--r--drivers/scsi/bfa/bfa_core.c693
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c5
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c188
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h17
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c151
-rw-r--r--drivers/scsi/bfa/bfa_svc.c69
-rw-r--r--drivers/scsi/bfa/bfa_svc.h4
-rw-r--r--drivers/scsi/bfa/bfad_attr.c47
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c66
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi_ms.h17
-rw-r--r--drivers/scsi/bfa/bfi_reg.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c29
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c12
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c18
-rw-r--r--drivers/scsi/dtc.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c256
-rw-r--r--drivers/scsi/fcoe/fcoe.h3
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c38
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c9
-rw-r--r--drivers/scsi/fd_mcs.c1
-rw-r--r--drivers/scsi/fdomain.c1
-rw-r--r--drivers/scsi/g_NCR5380.c1
-rw-r--r--drivers/scsi/gdth.c5
-rw-r--r--drivers/scsi/hpsa.c344
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h9
-rw-r--r--drivers/scsi/ibmmca.c1
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c19
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c5
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c173
-rw-r--r--drivers/scsi/in2000.c1
-rw-r--r--drivers/scsi/ipr.c87
-rw-r--r--drivers/scsi/ipr.h20
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/isci/host.c18
-rw-r--r--drivers/scsi/isci/host.h19
-rw-r--r--drivers/scsi/isci/init.c24
-rw-r--r--drivers/scsi/isci/phy.c171
-rw-r--r--drivers/scsi/isci/phy.h155
-rw-r--r--drivers/scsi/isci/port.c263
-rw-r--r--drivers/scsi/isci/port.h114
-rw-r--r--drivers/scsi/isci/registers.h27
-rw-r--r--drivers/scsi/isci/remote_device.c82
-rw-r--r--drivers/scsi/isci/remote_device.h212
-rw-r--r--drivers/scsi/isci/remote_node_context.c19
-rw-r--r--drivers/scsi/isci/remote_node_context.h97
-rw-r--r--drivers/scsi/isci/request.c386
-rw-r--r--drivers/scsi/isci/request.h228
-rw-r--r--drivers/scsi/isci/scu_task_context.h55
-rw-r--r--drivers/scsi/isci/task.c158
-rw-r--r--drivers/scsi/isci/task.h40
-rw-r--r--drivers/scsi/iscsi_tcp.c13
-rw-r--r--drivers/scsi/libfc/fc_disc.c7
-rw-r--r--drivers/scsi/libfc/fc_elsct.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c21
-rw-r--r--drivers/scsi/libfc/fc_fcp.c13
-rw-r--r--drivers/scsi/libfc/fc_libfc.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c239
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c22
-rw-r--r--drivers/scsi/libsas/sas_ata.c828
-rw-r--r--drivers/scsi/libsas/sas_discover.c246
-rw-r--r--drivers/scsi/libsas/sas_event.c96
-rw-r--r--drivers/scsi/libsas/sas_expander.c342
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c19
-rw-r--r--drivers/scsi/libsas/sas_init.c214
-rw-r--r--drivers/scsi/libsas/sas_internal.h97
-rw-r--r--drivers/scsi/libsas/sas_phy.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c32
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c364
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h57
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c168
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c87
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1282
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/mac_scsi.c1
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c13
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c6
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c15
-rw-r--r--drivers/scsi/ncr53c8xx.c1
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/osst.c1
-rw-r--r--drivers/scsi/pas16.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c422
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c127
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h6
-rw-r--r--drivers/scsi/pmcraid.h6
-rw-r--r--drivers/scsi/qla1280.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c122
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c630
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h63
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h117
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c445
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c410
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c435
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c148
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h45
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h24
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c92
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c573
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c57
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c268
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/sd.c103
-rw-r--r--drivers/scsi/sd.h35
-rw-r--r--drivers/scsi/sd_dif.c14
-rw-r--r--drivers/scsi/st.c33
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/storvsc_drv.c1548
-rw-r--r--drivers/scsi/sun3_scsi.c1
-rw-r--r--drivers/scsi/sun3_scsi_vme.c1
-rw-r--r--drivers/scsi/sym53c416.c1
-rw-r--r--drivers/scsi/t128.c1
-rw-r--r--drivers/scsi/u14-34f.c1
-rw-r--r--drivers/scsi/ufs/Kconfig49
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/ufs.h207
-rw-r--r--drivers/scsi/ufs/ufshcd.c1978
-rw-r--r--drivers/scsi/ufs/ufshci.h376
-rw-r--r--drivers/scsi/ultrastor.c1
-rw-r--r--drivers/scsi/virtio_scsi.c594
-rw-r--r--drivers/scsi/vmw_pvscsi.c65
-rw-r--r--drivers/scsi/vmw_pvscsi.h109
-rw-r--r--drivers/scsi/wd7000.c1
218 files changed, 14977 insertions, 5343 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index f672491774eb..a3adfb4357f5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -129,7 +129,6 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/byteorder.h>
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index f66c33b9ab41..d4da3708763b 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -47,7 +47,6 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 16570aa84aac..29684c8142b0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -619,6 +619,7 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
@@ -662,6 +663,13 @@ config VMWARE_PVSCSI
To compile this driver as a module, choose M here: the
module will be called vmw_pvscsi.
+config HYPERV_STORAGE
+ tristate "Microsoft Hyper-V virtual storage driver"
+ depends on SCSI && HYPERV
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V virtual storage driver.
+
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
@@ -967,9 +975,8 @@ config SCSI_IPS
config SCSI_IBMVSCSI
tristate "IBM Virtual SCSI support"
- depends on PPC_PSERIES || PPC_ISERIES
+ depends on PPC_PSERIES
select SCSI_SRP_ATTRS
- select VIOPATH if PPC_ISERIES
help
This is the IBM POWER Virtual SCSI Client
@@ -1897,6 +1904,14 @@ config SCSI_BFA_FC
To compile this driver as a module, choose M here. The module will
be called bfa.
+config SCSI_VIRTIO
+ tristate "virtio-scsi support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && VIRTIO
+ help
+ This is the virtual HBA driver for virtio. If the kernel will
+ be used in a virtual machine, say Y or M.
+
+
endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2b887498be50..8deedeaf5608 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o
@@ -141,7 +142,9 @@ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
+obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
+obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_ARM) += arm/
@@ -170,6 +173,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_mod-y += scsi_trace.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
+hv_storvsc-y := storvsc_drv.o
+
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 409f5805bdd6..52551662d107 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -151,7 +151,11 @@ int aac_msi;
int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
+int aac_sync_mode; /* Only Sync. transfer - disabled */
+module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
+ " 0=off, 1=on");
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
" 0=off, 1=on");
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index ffb587817efc..3fcf62724fad 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 28000
+# define AAC_DRIVER_BUILD 28900
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -756,8 +756,16 @@ struct src_mu_registers {
struct src_registers {
struct src_mu_registers MUnit; /* 00h - c7h */
- __le32 reserved1[130790]; /* c8h - 7fc5fh */
- struct src_inbound IndexRegs; /* 7fc60h */
+ union {
+ struct {
+ __le32 reserved1[130790]; /* c8h - 7fc5fh */
+ struct src_inbound IndexRegs; /* 7fc60h */
+ } tupelo;
+ struct {
+ __le32 reserved1[974]; /* c8h - fffh */
+ struct src_inbound IndexRegs; /* 1000h */
+ } denali;
+ } u;
};
#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
@@ -999,6 +1007,10 @@ struct aac_bus_info_response {
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
+#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
+#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
+#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+
struct aac_dev
{
@@ -1076,6 +1088,8 @@ struct aac_dev
# define AAC_MIN_FOOTPRINT_SIZE 8192
# define AAC_MIN_SRC_BAR0_SIZE 0x400000
# define AAC_MIN_SRC_BAR1_SIZE 0x800
+# define AAC_MIN_SRCV_BAR0_SIZE 0x100000
+# define AAC_MIN_SRCV_BAR1_SIZE 0x400
#endif
union
{
@@ -1116,7 +1130,10 @@ struct aac_dev
u8 msi;
int management_fib_count;
spinlock_t manage_lock;
-
+ spinlock_t sync_lock;
+ int sync_mode;
+ struct fib *sync_fib;
+ struct list_head sync_fib_list;
};
#define aac_adapter_interrupt(dev) \
@@ -1163,6 +1180,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
#define FIB_CONTEXT_FLAG (0x00000002)
+#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
/*
* Define the command values
@@ -1970,6 +1988,7 @@ int aac_rkt_init(struct aac_dev *dev);
int aac_nark_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev);
int aac_src_init(struct aac_dev *dev);
+int aac_srcv_init(struct aac_dev *dev);
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 7ac8fdb5577b..a35f54ebdce0 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -325,12 +325,14 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
u32 status[5];
struct Scsi_Host * host = dev->scsi_host_ptr;
+ extern int aac_sync_mode;
/*
* Check the preferred comm settings, defaults from template.
*/
dev->management_fib_count = 0;
spin_lock_init(&dev->manage_lock);
+ spin_lock_init(&dev->sync_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
@@ -344,13 +346,21 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
(status[0] == 0x00000001)) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
- if (dev->a_ops.adapter_comm) {
- if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
- dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
- dev->raw_io_interface = 1;
- } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
+ dev->sync_mode = aac_sync_mode;
+ if (dev->a_ops.adapter_comm &&
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
dev->comm_interface = AAC_COMM_MESSAGE;
dev->raw_io_interface = 1;
+ if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+ /* driver supports TYPE1 (Tupelo) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
+ /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
+ /* switch to sync. mode */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ dev->sync_mode = 1;
}
}
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
@@ -455,6 +465,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
INIT_LIST_HEAD(&dev->fib_list);
+ INIT_LIST_HEAD(&dev->sync_fib_list);
return dev;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e5f2d7d9002e..4b32ca442433 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -416,6 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
unsigned long flags = 0;
unsigned long qflags;
unsigned long mflags = 0;
+ unsigned long sflags = 0;
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
@@ -512,6 +513,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
spin_lock_irqsave(&fibptr->event_lock, flags);
}
+ if (dev->sync_mode) {
+ if (wait)
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (dev->sync_fib) {
+ list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ } else {
+ dev->sync_fib = fibptr;
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ if (wait) {
+ fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+ if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
+ return -EFAULT;
+ }
+ return 0;
+ }
+ return -EINPROGRESS;
+ }
+
if (aac_adapter_deliver(fibptr) != 0) {
printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
if (wait) {
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 705e13e470af..0d279c445a30 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -56,7 +56,7 @@
#include "aacraid.h"
-#define AAC_DRIVER_VERSION "1.1-7"
+#define AAC_DRIVER_VERSION "1.2-0"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@@ -162,7 +162,10 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
- { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */
+ { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
+ { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
+ { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -238,7 +241,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
};
/**
@@ -1102,6 +1108,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
int error = -ENODEV;
int unique_id = 0;
u64 dmamask;
+ extern int aac_sync_mode;
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
@@ -1162,6 +1169,21 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if ((*aac_drivers[index].init)(aac))
goto out_unmap;
+ if (aac->sync_mode) {
+ if (aac_sync_mode)
+ printk(KERN_INFO "%s%d: Sync. mode enforced "
+ "by driver parameter. This will cause "
+ "a significant performance decrease!\n",
+ aac->name,
+ aac->id);
+ else
+ printk(KERN_INFO "%s%d: Async. mode not supported "
+ "by current driver, sync. mode enforced."
+ "\nPlease update driver to get full performance.\n",
+ aac->name,
+ aac->id);
+ }
+
/*
* Start any kernel threads needed
*/
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ce530f113fdb..b029c7cc785b 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -643,6 +643,7 @@ int _aac_rx_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
aac_adapter_comm(dev, dev->comm_interface);
+ dev->sync_mode = 0; /* sync. mode not supported */
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index e5d4457121ea..beb533630d4b 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -385,6 +385,7 @@ int aac_sa_init(struct aac_dev *dev)
if(aac_init_adapter(dev) == NULL)
goto error_irq;
+ dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED,
"aacraid", (void *)dev ) < 0) {
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 957595a7a45c..2bee51506a91 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -96,6 +96,38 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
our_interrupt = 1;
/* handle AIF */
aac_intr_normal(dev, 0, 2, 0, NULL);
+ } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
+ unsigned long sflags;
+ struct list_head *entry;
+ int send_it = 0;
+
+ if (dev->sync_fib) {
+ our_interrupt = 1;
+ if (dev->sync_fib->callback)
+ dev->sync_fib->callback(dev->sync_fib->callback_data,
+ dev->sync_fib);
+ spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+ if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+ dev->management_fib_count--;
+ up(&dev->sync_fib->event_wait);
+ }
+ spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (!list_empty(&dev->sync_fib_list)) {
+ entry = dev->sync_fib_list.next;
+ dev->sync_fib = list_entry(entry, struct fib, fiblink);
+ list_del(entry);
+ send_it = 1;
+ } else {
+ dev->sync_fib = NULL;
+ }
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ if (send_it) {
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ }
}
}
@@ -177,56 +209,63 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
*/
src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
- ok = 0;
- start = jiffies;
+ if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
+ ok = 0;
+ start = jiffies;
- /*
- * Wait up to 30 seconds
- */
- while (time_before(jiffies, start+30*HZ)) {
- /* Delay 5 microseconds to let Mon960 get info. */
- udelay(5);
-
- /* Mon960 will set doorbell0 bit
- * when it has completed the command
+ /*
+ * Wait up to 5 minutes
*/
- if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
- /* Clear the doorbell */
- src_writel(dev,
- MUnit.ODR_C,
- OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
- ok = 1;
- break;
+ while (time_before(jiffies, start+300*HZ)) {
+ udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
+ /*
+ * Mon960 will set doorbell0 bit when it has completed the command.
+ */
+ if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+ /*
+ * Clear the doorbell.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ ok = 1;
+ break;
+ }
+ /*
+ * Yield the processor in case we are slow
+ */
+ msleep(1);
}
-
- /* Yield the processor in case we are slow */
- msleep(1);
- }
- if (unlikely(ok != 1)) {
- /* Restore interrupt mask even though we timed out */
- aac_adapter_enable_int(dev);
- return -ETIMEDOUT;
+ if (unlikely(ok != 1)) {
+ /*
+ * Restore interrupt mask even though we timed out
+ */
+ aac_adapter_enable_int(dev);
+ return -ETIMEDOUT;
+ }
+ /*
+ * Pull the synch status from Mailbox 0.
+ */
+ if (status)
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
+ if (r1)
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
+ if (r2)
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
+ if (r3)
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
+ if (r4)
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
+
+ /*
+ * Clear the synch command doorbell.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
}
- /* Pull the synch status from Mailbox 0 */
- if (status)
- *status = readl(&dev->IndexRegs->Mailbox[0]);
- if (r1)
- *r1 = readl(&dev->IndexRegs->Mailbox[1]);
- if (r2)
- *r2 = readl(&dev->IndexRegs->Mailbox[2]);
- if (r3)
- *r3 = readl(&dev->IndexRegs->Mailbox[3]);
- if (r4)
- *r4 = readl(&dev->IndexRegs->Mailbox[4]);
-
- /* Clear the synch command doorbell */
- src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
-
- /* Restore interrupt mask */
+ /*
+ * Restore interrupt mask
+ */
aac_adapter_enable_int(dev);
return 0;
-
}
/**
@@ -386,9 +425,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
{
if (!size) {
iounmap(dev->regs.src.bar0);
- dev->regs.src.bar0 = NULL;
- iounmap(dev->base);
- dev->base = NULL;
+ dev->base = dev->regs.src.bar0 = NULL;
return 0;
}
dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
@@ -404,7 +441,27 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
return -1;
}
dev->IndexRegs = &((struct src_registers __iomem *)
- dev->base)->IndexRegs;
+ dev->base)->u.tupelo.IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_srcv_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar0);
+ dev->base = dev->regs.src.bar0 = NULL;
+ return 0;
+ }
+ dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->u.denali.IndexRegs;
return 0;
}
@@ -419,7 +476,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
if (bled || (var != 0x00000001))
- bled = -EINVAL;
+ return -EINVAL;
if (dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_DOORBELL_RESET) {
src_writel(dev, MUnit.IDR, reset_mask);
@@ -579,15 +636,149 @@ int aac_src_init(struct aac_dev *dev)
dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+
+/**
+ * aac_srcv_init - initialize an SRCv card
+ * @dev: device to configure
+ *
+ */
+
+int aac_srcv_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char *name = dev->name;
+
+ dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
/*
- * Tell the adapter that all is configured, and it can
- * start accepting requests
+ * Check to see if the board panic'd while booting.
*/
- aac_src_start_adapter(dev);
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+ goto error_iounmap;
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
return 0;
error_iounmap:
return -1;
}
+
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index bfd618a69499..374c4edf4fcb 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -41,7 +41,6 @@
#include <linux/firmware.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/dma.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index f17c92cf808b..19a36945e6fd 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -239,7 +239,6 @@
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/blkdev.h>
-#include <asm/system.h>
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/string.h>
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index ed119cedaae0..ede91f378000 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 1c10b796c1a2..a3e6ed353917 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -53,7 +53,6 @@
#include <linux/gfp.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 5f8617dd43bb..25417d0e7acb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -8993,7 +8993,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
printk("Invalid Command IU Field\n");
break;
case SIU_PFC_TMF_NOT_SUPPORTED:
- printk("TMF not supportd\n");
+ printk("TMF not supported\n");
break;
case SIU_PFC_TMF_FAILED:
printk("TMF failed\n");
@@ -9113,7 +9113,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
break;
}
case SCSI_STATUS_OK:
- printk("%s: Interrupted for staus of 0???\n",
+ printk("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
/* FALLTHROUGH */
default:
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7d48700257a7..9328121804bb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -341,10 +341,10 @@ MODULE_PARM_DESC(aic79xx,
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
" slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
"\n"
-" Sample /etc/modprobe.conf line:\n"
-" Enable verbose logging\n"
-" Set tag depth on Controller 2/Target 2 to 10 tags\n"
-" Shorten the selection timeout to 128ms\n"
+" Sample modprobe configuration file:\n"
+" # Enable verbose logging\n"
+" # Set tag depth on Controller 2/Target 2 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index dc28b0a91b22..10172a3af1b9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -1049,7 +1049,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
switch (hscb->shared_data.status.scsi_status) {
case SCSI_STATUS_OK:
- printk("%s: Interrupted for staus of 0???\n",
+ printk("%s: Interrupted for status of 0???\n",
ahc_name(ahc));
break;
case SCSI_STATUS_CMD_TERMINATED:
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6251bb4f438..5a477cdc780d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -360,10 +360,10 @@ MODULE_PARM_DESC(aic7xxx,
" seltime:<int> Selection Timeout\n"
" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
"\n"
-" Sample /etc/modprobe.conf line:\n"
-" Toggle EISA/VLB probing\n"
-" Set tag depth on Controller 1/Target 1 to 10 tags\n"
-" Shorten the selection timeout to 128ms\n"
+" Sample modprobe configuration file:\n"
+" # Toggle EISA/VLB probing\n"
+" # Set tag depth on Controller 1/Target 1 to 10 tags\n"
+" # Shorten the selection timeout to 128ms\n"
"\n"
" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
);
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2863a9d22851..66cda669b417 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -80,6 +80,8 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags);
+void asd_set_dmamode(struct domain_device *dev);
+
/* ---------- TMFs ---------- */
int asd_abort_task(struct sas_task *);
int asd_abort_task_set(struct domain_device *, u8 *lun);
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 2e2ddec9c0b6..64136c56e706 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -109,26 +109,37 @@ static int asd_init_sata_tag_ddb(struct domain_device *dev)
return 0;
}
-static int asd_init_sata(struct domain_device *dev)
+void asd_set_dmamode(struct domain_device *dev)
{
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ struct ata_device *ata_dev = sas_to_ata_dev(dev);
int ddb = (int) (unsigned long) dev->lldd_dev;
u32 qdepth = 0;
- int res = 0;
- asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
- if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
- dev->sata_dev.identify_device &&
- dev->sata_dev.identify_device[10] != 0) {
- u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
- u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
-
- if (w76 & 0x100) /* NCQ? */
- qdepth = (w75 & 0x1F) + 1;
+ if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+ if (ata_id_has_ncq(ata_dev->id))
+ qdepth = ata_id_queue_depth(ata_dev->id);
asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
(1ULL<<qdepth)-1);
asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
}
+
+ if (qdepth > 0)
+ if (asd_init_sata_tag_ddb(dev) != 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ ata_dev->flags |= ATA_DFLAG_NCQ_OFF;
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+ }
+}
+
+static int asd_init_sata(struct domain_device *dev)
+{
+ struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
+ int ddb = (int) (unsigned long) dev->lldd_dev;
+
+ asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
dev->dev_type == SATA_PM_PORT) {
struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
@@ -136,9 +147,8 @@ static int asd_init_sata(struct domain_device *dev)
asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
}
asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
- if (qdepth > 0)
- res = asd_init_sata_tag_ddb(dev);
- return res;
+
+ return 0;
}
static int asd_init_target_ddb(struct domain_device *dev)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index d5ff142c93a2..ff80552ead84 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
- .slave_destroy = sas_slave_destroy,
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -82,7 +81,6 @@ static struct scsi_host_template aic94xx_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
@@ -972,7 +970,7 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (time < HZ)
return 0;
/* Wait for discovery to finish */
- scsi_flush_work(shost);
+ sas_drain_work(SHOST_TO_SAS_HA(shost));
return 1;
}
@@ -1010,6 +1008,8 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_clear_nexus_ha = asd_clear_nexus_ha,
.lldd_control_phy = asd_control_phy,
+
+ .lldd_ata_set_dmamode = asd_set_dmamode,
};
static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 0add73bdf2a4..cf9040933da6 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -181,7 +181,7 @@ static int asd_clear_nexus_I_T(struct domain_device *dev,
int asd_I_T_nexus_reset(struct domain_device *dev)
{
int res, tmp_res, i;
- struct sas_phy *phy = sas_find_local_phy(dev);
+ struct sas_phy *phy = sas_get_local_phy(dev);
/* Standard mandates link reset for ATA (type 0) and
* hard reset for SSP (type 1) */
int reset_type = (dev->dev_type == SATA_DEV ||
@@ -192,7 +192,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
ASD_DPRINTK("sending %s reset to %s\n",
reset_type ? "hard" : "soft", dev_name(&phy->dev));
res = sas_phy_reset(phy, reset_type);
- if (res == TMF_RESP_FUNC_COMPLETE) {
+ if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
/* wait for the maximum settle time */
msleep(500);
/* clear all outstanding commands (keep nexus suspended) */
@@ -201,7 +201,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
for (i = 0 ; i < 3; i++) {
tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
if (tmp_res == TC_RESUME)
- return res;
+ goto out;
msleep(500);
}
@@ -211,7 +211,10 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
dev_printk(KERN_ERR, &phy->dev,
"Failed to resume nexus after reset 0x%x\n", tmp_res);
- return TMF_RESP_FUNC_FAILED;
+ res = TMF_RESP_FUNC_FAILED;
+ out:
+ sas_put_local_phy(phy);
+ return res;
}
static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f980600f78a8..cbde1dca45ad 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -61,7 +61,6 @@
#include <linux/aer.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
@@ -1736,7 +1735,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
(uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
@@ -1985,7 +1984,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
}
message_out:
sg = scsi_sglist(cmd);
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ kunmap_atomic(buffer - sg->offset);
return retvalue;
}
@@ -2035,11 +2034,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd);
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ kunmap_atomic(buffer - sg->offset);
cmd->scsi_done(cmd);
}
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index c454e44cf51c..b330438ac662 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -138,7 +138,6 @@
#include <linux/stringify.h>
#include <linux/io.h>
-#include <asm/system.h>
#include <asm/ecard.h>
#include "../scsi.h"
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index a750aa72b8ef..2a28b4ad1975 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -305,7 +305,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
info->base = base;
info->info.scsi.io_base = base + 0x2000;
- info->info.scsi.irq = NO_IRQ;
+ info->info.scsi.irq = 0;
info->info.scsi.dma = NO_DMA;
info->info.scsi.io_shift = 5;
info->info.ifcfg.clockrate = 24; /* MHz */
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index a3398fe70a9c..c3b99c93637a 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -12,7 +12,6 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/system.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index e85c40b6e19b..6206a666a8ec 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2176,7 +2176,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
fn(info, SCpnt, result);
- if (info->scsi.irq != NO_IRQ) {
+ if (info->scsi.irq) {
spin_lock_irqsave(&info->host_lock, flags);
if (info->scsi.phase == PHASE_IDLE)
fas216_kick(info);
@@ -2276,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
* We should only be using this if we don't have an interrupt.
* Provide some "incentive" to use the queueing code.
*/
- BUG_ON(info->scsi.irq != NO_IRQ);
+ BUG_ON(info->scsi.irq);
info->internal_done = 0;
fas216_queue_command_lck(SCpnt, fas216_internal_done);
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 84b7127c0121..df2e1b3ddfe2 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -12,10 +12,6 @@
#ifndef FAS216_H
#define FAS216_H
-#ifndef NO_IRQ
-#define NO_IRQ 255
-#endif
-
#include <scsi/scsi_eh.h>
#include "queue.h"
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 849cdf89f7bb..d25f944b59c2 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -13,7 +13,6 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/system.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7e6eca4a125e..68ce08552f69 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -30,7 +30,6 @@
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <scsi/scsi.h>
@@ -2583,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* this than via the PCI device table
*/
if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
- error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+ atpdev->chip_ver = pdev->revision;
if (atpdev->chip_ver < 2)
goto err_eio;
}
@@ -2602,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
base_io &= 0xfffffff8;
if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
- error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+ atpdev->chip_ver = pdev->revision;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
host_id = inb(base_io + 0x39);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index a796de935054..4ad7e368bbc2 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -225,9 +225,9 @@ struct bfa_faa_args_s {
};
struct bfa_iocfc_s {
+ bfa_fsm_t fsm;
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
- int action;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
u8 hw_qid[BFI_IOC_MAX_CQS];
@@ -236,7 +236,9 @@ struct bfa_iocfc_s {
struct bfa_cb_qe_s dis_hcb_qe;
struct bfa_cb_qe_s en_hcb_qe;
struct bfa_cb_qe_s stats_hcb_qe;
- bfa_boolean_t cfgdone;
+ bfa_boolean_t submod_enabled;
+ bfa_boolean_t cb_reqd; /* Driver call back reqd */
+ bfa_status_t op_status; /* Status of bfa iocfc op */
struct bfa_dma_s cfg_info;
struct bfi_iocfc_cfg_s *cfginfo;
@@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
u32 *end);
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
-wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
-wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
struct bfi_pbc_vport_s *pbc_vport);
@@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 4bd546bcc240..456e5762977d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -200,13 +200,431 @@ enum {
#define DEF_CFG_NUM_SBOOT_LUNS 16
/*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+ struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+ struct bfa_iocfc_s, enum iocfc_event);
+
+/*
* forward declaration for IOC FC functions
*/
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
static void bfa_iocfc_disable_cbfn(void *bfa_arg);
static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_INIT:
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_START:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ break;
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_fcport_init(iocfc->bfa);
+ bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_DCONF_DONE:
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+ bfa_iocfc_stop_cb, iocfc->bfa);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_CFG_DONE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ case IOCFC_E_IOC_FAILED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+ if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+ break;
+
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, iocfc->bfa);
+ iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_ENABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+ bfa_isr_disable(iocfc->bfa);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+ bfa_trc(iocfc->bfa, event);
+
+ switch (event) {
+ case IOCFC_E_STOP:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+ break;
+ case IOCFC_E_DISABLE:
+ bfa_ioc_disable(&iocfc->bfa->ioc);
+ break;
+ case IOCFC_E_IOC_ENABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+ break;
+ case IOCFC_E_IOC_DISABLED:
+ bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+ iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+ bfa_iocfc_disable_cb, iocfc->bfa);
+ break;
+ case IOCFC_E_IOC_FAILED:
+ break;
+ default:
+ bfa_sm_fault(iocfc->bfa, event);
+ break;
+ }
+}
/*
* BFA Interrupt handling functions
@@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-static inline void
+bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
u32 pi, ci;
struct list_head *waitq;
+ bfa_boolean_t ret;
ci = bfa_rspq_ci(bfa, qid);
pi = bfa_rspq_pi(bfa, qid);
+ ret = (ci != pi);
+
while (ci != pi) {
m = bfa_rspq_elem(bfa, qid, ci);
WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
@@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
waitq = bfa_reqq(bfa, qid);
if (!list_empty(waitq))
bfa_reqq_resume(bfa, qid);
+
+ return ret;
}
static inline void
@@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa)
{
u32 intr, qintr;
int queue;
+ bfa_boolean_t rspq_comp = BFA_FALSE;
intr = readl(bfa->iocfc.bfa_regs.intr_status);
@@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa)
*/
if (bfa->queue_process) {
for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
- bfa_isr_rspq(bfa, queue);
+ if (bfa_isr_rspq(bfa, queue))
+ rspq_comp = BFA_TRUE;
}
if (!intr)
- return BFA_TRUE;
+ return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
/*
* CPE completion queue interrupt
@@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg)
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
- if (!iocfc->cfgdone)
+ if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
cfg_info->intr_attr.coalesce = BFA_TRUE;
- iocfc->cfgdone = BFA_FALSE;
-
/*
* dma map IOC configuration itself
*/
@@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa->bfad = bfad;
iocfc->bfa = bfa;
- iocfc->action = BFA_IOCFC_ACT_NONE;
-
iocfc->cfg = *cfg;
/*
@@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->start(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_TRUE;
}
/*
@@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa)
{
int i;
+ if (bfa->iocfc.submod_enabled == BFA_FALSE)
+ return;
+
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->iocdisable(bfa);
+
+ bfa->iocfc.submod_enabled = BFA_FALSE;
}
static void
@@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
{
struct bfa_s *bfa = bfa_arg;
- if (complete) {
- if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
- bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
- else
- bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
- } else {
- if (bfa->iocfc.cfgdone)
- bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
- }
+ if (complete)
+ bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
}
static void
@@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
if (compl)
complete(&bfad->comp);
- else
- bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
}
static void
@@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
- iocfc->cfgdone = BFA_TRUE;
-
/*
* configure queue register offsets as learnt from firmware
*/
@@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
*/
bfa_msix_queue_install(bfa);
- /*
- * Configuration is complete - initialize/start submodules
- */
- bfa_fcport_init(bfa);
-
- if (iocfc->action == BFA_IOCFC_ACT_INIT) {
- if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
- bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- } else {
- if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
- bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
- bfa_iocfc_enable_cb, bfa);
- bfa_iocfc_start_submod(bfa);
+ if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+ bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+ bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
}
}
+
void
bfa_iocfc_reset_queues(struct bfa_s *bfa)
{
@@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
}
}
+/*
+ * Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+ cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+ bfa->ioc.attr->pwwn = msg->pwwn;
+ bfa->ioc.attr->nwwn = msg->nwwn;
+ bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
/* Fabric Assigned Address specific functions */
/*
@@ -855,84 +1280,13 @@ bfa_faa_validate_request(struct bfa_s *bfa)
if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
return BFA_STATUS_FEATURE_NOT_SUPPORTED;
} else {
- if (!bfa_ioc_is_acq_addr(&bfa->ioc))
- return BFA_STATUS_IOC_NON_OP;
+ return BFA_STATUS_IOC_NON_OP;
}
return BFA_STATUS_OK;
}
bfa_status_t
-bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
-{
- struct bfi_faa_en_dis_s faa_enable_req;
- struct bfa_iocfc_s *iocfc = &bfa->iocfc;
- bfa_status_t status;
-
- iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
- iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
- status = bfa_faa_validate_request(bfa);
- if (status != BFA_STATUS_OK)
- return status;
-
- if (iocfc->faa_args.busy == BFA_TRUE)
- return BFA_STATUS_DEVBUSY;
-
- if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
- return BFA_STATUS_FAA_ENABLED;
-
- if (bfa_fcport_is_trunk_enabled(bfa))
- return BFA_STATUS_ERROR_TRUNK_ENABLED;
-
- bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
- iocfc->faa_args.busy = BFA_TRUE;
-
- memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
- bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
- BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
-
- bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
- sizeof(struct bfi_faa_en_dis_s));
-
- return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
- void *cbarg)
-{
- struct bfi_faa_en_dis_s faa_disable_req;
- struct bfa_iocfc_s *iocfc = &bfa->iocfc;
- bfa_status_t status;
-
- iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
- iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
- status = bfa_faa_validate_request(bfa);
- if (status != BFA_STATUS_OK)
- return status;
-
- if (iocfc->faa_args.busy == BFA_TRUE)
- return BFA_STATUS_DEVBUSY;
-
- if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
- return BFA_STATUS_FAA_DISABLED;
-
- bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
- iocfc->faa_args.busy = BFA_TRUE;
-
- memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
- bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
- BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
-
- bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
- sizeof(struct bfi_faa_en_dis_s));
-
- return BFA_STATUS_OK;
-}
-
-bfa_status_t
bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
bfa_cb_iocfc_t cbfn, void *cbarg)
{
@@ -963,38 +1317,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
}
/*
- * FAA enable response
- */
-static void
-bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
- struct bfi_faa_en_dis_rsp_s *rsp)
-{
- void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
- bfa_status_t status = rsp->status;
-
- WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
- iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
- iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
- * FAA disable response
- */
-static void
-bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
- struct bfi_faa_en_dis_rsp_s *rsp)
-{
- void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
- bfa_status_t status = rsp->status;
-
- WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
- iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
- iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
* FAA query response
*/
static void
@@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
{
struct bfa_s *bfa = bfa_arg;
- if (status == BFA_STATUS_FAA_ACQ_ADDR) {
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- return;
- }
-
- if (status != BFA_STATUS_OK) {
- bfa_isr_disable(bfa);
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
- bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
- bfa_iocfc_enable_cb, bfa);
- return;
- }
-
- bfa_iocfc_send_cfg(bfa);
- bfa_dconf_modinit(bfa);
+ if (status == BFA_STATUS_OK)
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+ else
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
}
/*
@@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
- bfa_isr_disable(bfa);
- bfa_iocfc_disable_submod(bfa);
-
- if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
- bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
- bfa);
- else {
- WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
- bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
- bfa);
- }
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
}
/*
@@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
struct bfa_s *bfa = bfa_arg;
bfa->queue_process = BFA_FALSE;
-
- bfa_isr_disable(bfa);
- bfa_iocfc_disable_submod(bfa);
-
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
- bfa);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
}
/*
@@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
bfa_isr_enable(bfa);
}
-
/*
* Query IOC memory requirement information.
*/
@@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&bfa->comp_q);
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+ bfa->iocfc.cb_reqd = BFA_FALSE;
+ bfa->iocfc.op_status = BFA_STATUS_OK;
+ bfa->iocfc.submod_enabled = BFA_FALSE;
+
+ bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
}
/*
@@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
void
bfa_iocfc_init(struct bfa_s *bfa)
{
- bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
- bfa_ioc_enable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
}
/*
@@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
void
bfa_iocfc_start(struct bfa_s *bfa)
{
- if (bfa->iocfc.cfgdone)
- bfa_iocfc_start_submod(bfa);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
}
/*
@@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa)
void
bfa_iocfc_stop(struct bfa_s *bfa)
{
- bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
bfa->queue_process = BFA_FALSE;
- bfa_dconf_modexit(bfa);
- if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
- bfa_ioc_disable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
}
void
@@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
case BFI_IOCFC_I2H_UPDATEQ_RSP:
iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
break;
- case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
- bfa_faa_enable_reply(iocfc,
- (struct bfi_faa_en_dis_rsp_s *)msg);
- break;
- case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
- bfa_faa_disable_reply(iocfc,
- (struct bfi_faa_en_dis_rsp_s *)msg);
+ case BFI_IOCFC_I2H_ADDR_MSG:
+ bfa_iocfc_process_faa_addr(bfa,
+ (struct bfi_faa_addr_msg_s *)msg);
break;
case BFI_IOCFC_I2H_FAA_QUERY_RSP:
bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Enable");
- bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
- bfa_ioc_enable(&bfa->ioc);
+ bfa->iocfc.cb_reqd = BFA_TRUE;
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
}
void
@@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Disable");
- bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
bfa->queue_process = BFA_FALSE;
- bfa_ioc_disable(&bfa->ioc);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
}
-
bfa_boolean_t
bfa_iocfc_is_operational(struct bfa_s *bfa)
{
- return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+ return bfa_ioc_is_operational(&bfa->ioc) &&
+ bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
}
/*
@@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
}
}
-void
-bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
-{
- if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
- if (bfa->iocfc.cfgdone == BFA_TRUE)
- bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
- bfa_iocfc_init_cb, bfa);
- }
-}
-
/*
* Return the list of PCI vendor/device id lists supported by this
* BFA instance.
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index cb07c628b2f1..36756ce0e58f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
- u8 rsvd[2];
+ u8 rsvd[6];
};
#pragma pack()
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index d4f951fe753e..5d2a1307e5ce 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
if (vport_drv->comp_del)
complete(vport_drv->comp_del);
+ else
+ kfree(vport_drv);
bfa_lps_delete(vport->lps);
}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 52628d5d3c9b..fe0463a1db04 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
* - MAX receive frame size
*/
rport->cisc = plogi->csp.cisc;
- rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+ if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+ rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+ else
+ rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index eca7ab78085b..14e6284e48e4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
@@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
-
/*
* IOC state machine definitions/declarations
*/
@@ -114,7 +112,6 @@ enum ioc_event {
IOC_E_HWERROR = 10, /* hardware error interrupt */
IOC_E_TIMEOUT = 11, /* timeout */
IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
- IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
- {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
};
/*
@@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
- bfa_ioc_check_attr_wwns(ioc);
- bfa_ioc_hb_monitor(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
- case IOC_E_FWRSP_ACQ_ADDR:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_hb_monitor(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
- break;
-
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
@@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
-/*
- * Acquiring address from fabric (entry function)
- */
-static void
-bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
-{
-}
-
-/*
- * Acquiring address from the fabric
- */
-static void
-bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
- bfa_trc(ioc, event);
-
- switch (event) {
- case IOC_E_FWRSP_GETATTR:
- bfa_ioc_check_attr_wwns(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
- break;
-
- case IOC_E_PFFAILED:
- case IOC_E_HWERROR:
- bfa_hb_timer_stop(ioc);
- case IOC_E_HBFAIL:
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
- if (event != IOC_E_PFFAILED)
- bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
- break;
-
- case IOC_E_DISABLE:
- bfa_hb_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
- break;
-
- case IOC_E_ENABLE:
- break;
-
- default:
- bfa_sm_fault(ioc, event);
- }
-}
-
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
@@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+ bfa_ioc_hb_monitor(ioc);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
@@ -738,26 +681,60 @@ static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
struct bfi_ioc_image_hdr_s fwhdr;
- u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+ u32 r32, fwstate, pgnum, pgoff, loff = 0;
+ int i;
+
+ /*
+ * Spin on init semaphore to serialize.
+ */
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ while (r32 & 0x1) {
+ udelay(20);
+ r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+ }
/* h/w sem init */
- if (fwstate == BFI_IOC_UNINIT)
+ fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+ if (fwstate == BFI_IOC_UNINIT) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
+ }
bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
- if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
goto sem_get;
+ }
+
+ /*
+ * Clear fwver hdr
+ */
+ pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+ writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+ bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+ loff += sizeof(u32);
+ }
bfa_trc(iocpf->ioc, fwstate);
- bfa_trc(iocpf->ioc, fwhdr.exec);
+ bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
/*
- * Try to lock and then unlock the semaphore.
+ * Unlock the hw semaphore. Should be here only once per boot.
*/
readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+
+ /*
+ * unlock init semaphore.
+ */
+ writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 i;
u32 asicmode;
- /*
- * Initialize LMEM first before code download
- */
- bfa_ioc_lmem_init(ioc);
-
bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
@@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
bfa_ioc_pll_init_asic(ioc);
ioc->pllinit = BFA_TRUE;
+
+ /*
+ * Initialize LMEM
+ */
+ bfa_ioc_lmem_init(ioc);
+
/*
* release semaphore.
*/
@@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
bfa_ioc_getattr_reply(ioc);
break;
- case BFI_IOC_I2H_ACQ_ADDR_REPLY:
- bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
- break;
-
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
@@ -2416,15 +2390,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
}
/*
- * Return TRUE if IOC is in acquiring address state
- */
-bfa_boolean_t
-bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
-{
- return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
-}
-
-/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
@@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
-{
- if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
- return;
- if (ioc->attr->nwwn == 0)
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
- if (ioc->attr->pwwn == 0)
- bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
-}
-
/*
* BFA IOC PF private functions
*/
@@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
*/
#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
-#define BFA_DIAG_FWPING_TOV 1000 /* msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
/* IOC event handler */
static void
@@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
}
static void
-diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
{
bfa_trc(diag, diag->ledtest.lock);
diag->ledtest.lock = BFA_FALSE;
@@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
u32 pattern, struct bfa_diag_memtest_result *result,
bfa_cb_diag_t cbfn, void *cbarg)
{
+ u32 memtest_tov;
+
bfa_trc(diag, pattern);
if (!bfa_ioc_adapter_is_disabled(diag->ioc))
@@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
/* download memtest code and take LPU0 out of reset */
bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+ memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+ CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
- bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+ bfa_diag_memtest_done, diag, memtest_tov);
diag->timer_active = 1;
return BFA_STATUS_OK;
}
@@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
case BFA_DCONF_SM_INIT:
if (dconf->min_cfg) {
bfa_trc(dconf->bfa, dconf->min_cfg);
+ bfa_fsm_send_event(&dconf->bfa->iocfc,
+ IOCFC_E_DCONF_DONE);
return;
}
bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
- dconf->flashdone = BFA_FALSE;
- bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
BFA_FLASH_PART_DRV, dconf->instance,
dconf->dconf,
sizeof(struct bfa_dconf_s), 0,
bfa_dconf_init_cb, dconf->bfa);
if (bfa_status != BFA_STATUS_OK) {
+ bfa_timer_stop(&dconf->timer);
bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
return;
}
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
case BFA_DCONF_SM_IOCDISABLE:
case BFA_DCONF_SM_WR:
case BFA_DCONF_SM_FLASH_COMP:
@@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
switch (event) {
case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
break;
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+ break;
case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
break;
default:
@@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_INIT:
case BFA_DCONF_SM_IOCDISABLE:
@@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
bfa_timer_stop(&dconf->timer);
case BFA_DCONF_SM_TIMEOUT:
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
- dconf->flashdone = BFA_TRUE;
- bfa_trc(dconf->bfa, dconf->flashdone);
- bfa_ioc_disable(&dconf->bfa->ioc);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
default:
bfa_sm_fault(dconf->bfa, event);
@@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
break;
case BFA_DCONF_SM_EXIT:
- dconf->flashdone = BFA_TRUE;
bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
break;
case BFA_DCONF_SM_IOCDISABLE:
break;
@@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (cfg->drvcfg.min_cfg) {
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
dconf->min_cfg = BFA_TRUE;
- /*
- * Set the flashdone flag to TRUE explicitly as no flash
- * write will happen in min_cfg mode.
- */
- dconf->flashdone = BFA_TRUE;
} else {
dconf->min_cfg = BFA_FALSE;
bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
@@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
struct bfa_s *bfa = arg;
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
- dconf->flashdone = BFA_TRUE;
- bfa_trc(bfa, dconf->flashdone);
- bfa_iocfc_cb_dconf_modinit(bfa, status);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
if (status == BFA_STATUS_OK) {
bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
dconf->dconf->hdr.version = BFI_DCONF_VERSION;
}
- bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+ bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
}
void
@@ -5977,7 +5933,5 @@ void
bfa_dconf_modexit(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
- BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
- bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 546d46b37101..1a99d4b5b50f 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -373,6 +373,22 @@ struct bfa_cb_qe_s {
};
/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+ IOCFC_E_INIT = 1, /* IOCFC init request */
+ IOCFC_E_START = 2, /* IOCFC mod start request */
+ IOCFC_E_STOP = 3, /* IOCFC stop request */
+ IOCFC_E_ENABLE = 4, /* IOCFC enable request */
+ IOCFC_E_DISABLE = 5, /* IOCFC disable request */
+ IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
+ IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
+ IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
+ IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
+ IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
+};
+
+/*
* ASIC block configurtion related
*/
@@ -706,7 +722,6 @@ struct bfa_dconf_s {
struct bfa_dconf_mod_s {
bfa_sm_t sm;
u8 instance;
- bfa_boolean_t flashdone;
bfa_boolean_t read_data_valid;
bfa_boolean_t min_cfg;
struct bfa_timer_s timer;
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index d1b8f0caaa79..2eb0c6a2938d 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
}
#define CT2_NFC_MAX_DELAY 1000
+#define CT2_NFC_VER_VALID 0x143
+#define BFA_IOC_PLL_POLL 1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ return BFA_TRUE;
+
+ return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+ u32 r32;
+ int i;
+
+ writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (!(r32 & __NFC_CONTROLLER_HALTED))
+ return;
+ udelay(1000);
+ }
+ WARN_ON(1);
+}
+
bfa_status_t
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
{
- u32 wgn, r32;
- int i;
+ u32 wgn, r32, nfc_ver, i;
- /*
- * Initialize PLL if not already done by NFC
- */
wgn = readl(rb + CT2_WGN_STATUS);
- if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+ nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+ if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+ (nfc_ver >= CT2_NFC_VER_VALID)) {
+ if (bfa_ioc_ct2_nfc_halted(rb))
+ bfa_ioc_ct2_nfc_resume(rb);
+
+ writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+ rb + CT2_CSI_FW_CTL_SET_REG);
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+ break;
+ }
+
+ WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+ for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+ break;
+ }
+
+ WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+ udelay(1000);
+
+ r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+ WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+ } else {
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
break;
udelay(1000);
}
- }
- /*
- * Mask the interrupts and clear any
- * pending interrupts.
- */
- writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
- writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
-
- r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
- if (r32 == 1) {
- writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
}
- r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
- if (r32 == 1) {
- writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
- readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
- }
-
- bfa_ioc_ct2_mac_reset(rb);
- bfa_ioc_ct2_sclk_init(rb);
- bfa_ioc_ct2_lclk_init(rb);
-
- /*
- * release soft reset on s_clk & l_clk
- */
- r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
- writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
- (rb + CT2_APP_PLL_SCLK_CTL_REG));
-
- /*
- * release soft reset on s_clk & l_clk
- */
- r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
- writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
- (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* Announce flash device presence, if flash was corrupted.
*/
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
- r32 = readl((rb + PSS_GPIO_OUT_REG));
+ r32 = readl(rb + PSS_GPIO_OUT_REG);
writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
- r32 = readl((rb + PSS_GPIO_OE_REG));
+ r32 = readl(rb + PSS_GPIO_OE_REG);
writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
}
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts.
+ */
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ /* For first time initialization, no need to clear interrupts */
+ r32 = readl(rb + HOST_SEM5_REG);
+ if (r32 & 0x1) {
+ r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ if (r32 == 1) {
+ writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ if (r32 == 1) {
+ writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+ }
+ }
+
bfa_ioc_ct2_mem_init(rb);
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
- writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+ writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
+
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa8a0eaf91f9..2e856e6710f7 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_login);
+ bfa_lps_send_login(lps);
break;
case BFA_LPS_SM_OFFLINE:
@@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
break;
case BFA_STATUS_VPORT_MAX:
- if (!rsp->ext_status)
+ if (rsp->ext_status)
bfa_lps_no_res(lps, rsp->ext_status);
break;
@@ -3084,33 +3085,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
}
static void
-bfa_fcport_send_txcredit(void *port_cbarg)
-{
-
- struct bfa_fcport_s *fcport = port_cbarg;
- struct bfi_fcport_set_svc_params_req_s *m;
-
- /*
- * check for room in queue to send request now
- */
- m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
- if (!m) {
- bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
- return;
- }
-
- bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
- bfa_fn_lpu(fcport->bfa));
- m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
- m->bb_scn = fcport->cfg.bb_scn;
-
- /*
- * queue I/O message to firmware
- */
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
-}
-
-static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
struct bfa_qos_stats_s *s)
{
@@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
return BFA_STATUS_UNSUPP_SPEED;
}
- /* For Mezz card, port speed entered needs to be checked */
- if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
- if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
- /* For CT2, 1G is not supported */
- if ((speed == BFA_PORT_SPEED_1GBPS) &&
- (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
- return BFA_STATUS_UNSUPP_SPEED;
+ /* Port speed entered needs to be checked */
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+ /* For CT2, 1G is not supported */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+ return BFA_STATUS_UNSUPP_SPEED;
- /* Already checked for Auto Speed and Max Speed supp */
- if (!(speed == BFA_PORT_SPEED_1GBPS ||
- speed == BFA_PORT_SPEED_2GBPS ||
- speed == BFA_PORT_SPEED_4GBPS ||
- speed == BFA_PORT_SPEED_8GBPS ||
- speed == BFA_PORT_SPEED_16GBPS ||
- speed == BFA_PORT_SPEED_AUTO))
- return BFA_STATUS_UNSUPP_SPEED;
- } else {
- if (speed != BFA_PORT_SPEED_10GBPS)
- return BFA_STATUS_UNSUPP_SPEED;
- }
+ /* Already checked for Auto Speed and Max Speed supp */
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
}
fcport->cfg.speed = speed;
@@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
fcport->cfg.bb_scn = bb_scn;
if (bb_scn)
fcport->bbsc_op_state = BFA_TRUE;
- bfa_fcport_send_txcredit(fcport);
}
/*
@@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
attr->port_state = BFA_PORT_ST_IOCDIS;
else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
attr->port_state = BFA_PORT_ST_FWMISMATCH;
- else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
- attr->port_state = BFA_PORT_ST_ACQ_ADDR;
}
/* FCoE vlan */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index b52cbb6bcd5a..f30067564639 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
/* FAA specific APIs */
-bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
- bfa_cb_iocfc_t cbfn, void *cbarg);
-bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
- bfa_cb_iocfc_t cbfn, void *cbarg);
bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
bfa_cb_iocfc_t cbfn, void *cbarg);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 1938fe0473e9..7b1ecd2b3ffe 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ uint32_t status;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (status != BFA_STATUS_OK)
+ return -EIO;
+
+ wait_for_completion(&fcomp.comp);
+ if (fcomp.status != BFA_STATUS_OK)
+ return -EIO;
+
+ return 0;
+}
+
static int
bfad_im_vport_delete(struct fc_vport *fc_vport)
{
@@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
unsigned long flags;
struct completion fcomp;
- if (im_port->flags & BFAD_PORT_DELETE)
- goto free_scsi_host;
+ if (im_port->flags & BFAD_PORT_DELETE) {
+ bfad_scsi_host_free(bfad, im_port);
+ list_del(&vport->list_entry);
+ return 0;
+ }
port = im_port->port;
@@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
wait_for_completion(vport->comp_del);
-free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
list_del(&vport->list_entry);
kfree(vport);
@@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = {
.show_rport_dev_loss_tmo = 1,
.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
-
+ .issue_fc_host_lip = bfad_im_issue_fc_host_lip,
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 530de2b1200a..e1f4b10df42a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -1288,50 +1288,6 @@ out:
}
int
-bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
-{
- struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- unsigned long flags;
- struct bfad_hal_comp fcomp;
-
- init_completion(&fcomp.comp);
- iocmd->status = BFA_STATUS_OK;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
- if (iocmd->status != BFA_STATUS_OK)
- goto out;
-
- wait_for_completion(&fcomp.comp);
- iocmd->status = fcomp.status;
-out:
- return 0;
-}
-
-int
-bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
-{
- struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- unsigned long flags;
- struct bfad_hal_comp fcomp;
-
- init_completion(&fcomp.comp);
- iocmd->status = BFA_STATUS_OK;
- spin_lock_irqsave(&bfad->bfad_lock, flags);
- iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
- if (iocmd->status != BFA_STATUS_OK)
- goto out;
-
- wait_for_completion(&fcomp.comp);
- iocmd->status = fcomp.status;
-out:
- return 0;
-}
-
-int
bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
@@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
void *iocmd_bufptr;
unsigned long flags;
+ u32 offset;
if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
@@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
spin_lock_irqsave(&bfad->bfad_lock, flags);
+ offset = iocmd->offset;
iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
- (u32 *)&iocmd->offset, &iocmd->bufsz);
+ &offset, &iocmd->bufsz);
+ iocmd->offset = offset;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
out:
return 0;
@@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_FLASH_DISABLE_OPTROM:
rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
break;
- case IOCMD_FAA_ENABLE:
- rc = bfad_iocmd_faa_enable(bfad, iocmd);
- break;
- case IOCMD_FAA_DISABLE:
- rc = bfad_iocmd_faa_disable(bfad, iocmd);
- break;
case IOCMD_FAA_QUERY:
rc = bfad_iocmd_faa_query(bfad, iocmd);
break;
@@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) job->shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
+ struct request_queue *request_q = job->req->q;
void *payload_kbuf;
int rc = -EINVAL;
+ /*
+ * Set the BSG device request_queue size to 256 to support
+ * payloads larger than 512*1024K bytes.
+ */
+ blk_queue_max_segments(request_q, 256);
+
/* Allocate a temp buffer to hold the passed in user space command */
payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
if (!payload_kbuf) {
@@ -3047,8 +3007,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
* Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
* buffer of size bsg_data->payload_len
*/
- bsg_fcpt = (struct bfa_bsg_fcpt_s *)
- kzalloc(bsg_data->payload_len, GFP_KERNEL);
+ bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
if (!bsg_fcpt)
goto out;
@@ -3060,6 +3019,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
if (drv_fcxp == NULL) {
+ kfree(bsg_fcpt);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index e859adb9aa9e..17ad67283130 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -83,8 +83,6 @@ enum {
IOCMD_PORT_CFG_MODE,
IOCMD_FLASH_ENABLE_OPTROM,
IOCMD_FLASH_DISABLE_OPTROM,
- IOCMD_FAA_ENABLE,
- IOCMD_FAA_DISABLE,
IOCMD_FAA_QUERY,
IOCMD_CEE_GET_ATTR,
IOCMD_CEE_GET_STATS,
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index dc5b9d99c450..7f74f1d19124 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.0.2.2"
+#define BFAD_DRIVER_VERSION "3.0.23.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 0d9f1fb50db0..d4220e13cafa 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs {
BFI_IOCFC_H2I_CFG_REQ = 1,
BFI_IOCFC_H2I_SET_INTR_REQ = 2,
BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
- BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4,
- BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5,
- BFI_IOCFC_H2I_FAA_QUERY_REQ = 6,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ = 4,
+ BFI_IOCFC_H2I_ADDR_REQ = 5,
};
enum bfi_iocfc_i2h_msgs {
BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
- BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4),
- BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5),
- BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6),
+ BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4),
+ BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5),
};
struct bfi_iocfc_cfg_s {
@@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s {
struct bfi_mhdr_s mh; /* common msg header */
};
+struct bfi_faa_addr_msg_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 rsvd[4];
+ wwn_t pwwn; /* Fabric acquired PWWN */
+ wwn_t nwwn; /* Fabric acquired PWWN */
+};
+
/*
* BFI_IOCFC_H2I_FAA_QUERY_REQ message
*/
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index d892064b64a8..ed5f159e1867 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -335,11 +335,17 @@ enum {
#define __PMM_1T_PNDB_P 0x00000002
#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
#define CT2_WGN_STATUS 0x00014990
+#define __A2T_AHB_LOAD 0x00000800
#define __WGN_READY 0x00000400
#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_CLR_REG 0x00027420
#define CT2_NFC_CSR_SET_REG 0x00027424
#define __HALT_NFC_CONTROLLER 0x00000002
#define __NFC_CONTROLLER_HALTED 0x00001000
+#define CT2_RSC_GPR15_REG 0x0002765c
+#define CT2_CSI_FW_CTL_REG 0x00027080
+#define CT2_CSI_FW_CTL_SET_REG 0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
#define __CSI_MAC_RESET 0x00000010
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 049ea907e04a..a4953ef9e53a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.9"
+#define BNX2FC_VERSION "1.0.10"
#define PFX "bnx2fc: "
@@ -114,6 +114,8 @@
#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
#define BNX2FC_MAX_SEQS 255
+#define BNX2FC_MAX_RETRY_CNT 3
+#define BNX2FC_MAX_RPORT_RETRY_CNT 255
#define BNX2FC_READ (1 << 1)
#define BNX2FC_WRITE (1 << 0)
@@ -121,8 +123,10 @@
#define BNX2FC_MIN_XID 0
#define BNX2FC_MAX_XID \
(BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
+#define FCOE_MAX_NUM_XIDS 0x2000
#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1)
-#define FCOE_MAX_XID (FCOE_MIN_XID + 4095)
+#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1)
+#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
#define BNX2FC_MAX_LUN 0xFFFF
#define BNX2FC_MAX_FCP_TGT 256
#define BNX2FC_MAX_CMD_LEN 16
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index 399cda047a77..dad9924abbbb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -7,7 +7,7 @@
/* Current FCoE HSI version number composed of two fields (16 bit) */
/* Implies on a change broken previous HSI */
-#define FCOE_HSI_MAJOR_VERSION (1)
+#define FCOE_HSI_MAJOR_VERSION (2)
/* Implies on a change which does not broken previous HSI */
#define FCOE_HSI_MINOR_VERSION (1)
@@ -47,6 +47,7 @@
#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6)
+#define FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81)
/* CQE type */
#define FCOE_PENDING_CQE_TYPE 0
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 8c6156a10d90..c1c6a92a0b98 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Oct 21, 2011"
+#define DRV_MODULE_RELDATE "Jan 22, 2011"
static char version[] __devinitdata =
@@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
- + frag->page_offset;
+ cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
}
@@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_eof = eof;
cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) {
- kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ kunmap_atomic(cp);
cp = NULL;
}
@@ -440,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
fr->fr_dev = lport;
bg = &bnx2fc_global;
- spin_lock_bh(&bg->fcoe_rx_list.lock);
+ spin_lock(&bg->fcoe_rx_list.lock);
__skb_queue_tail(&bg->fcoe_rx_list, skb);
if (bg->fcoe_rx_list.qlen == 1)
wake_up_process(bg->thread);
- spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ spin_unlock(&bg->fcoe_rx_list.lock);
return 0;
err:
@@ -940,8 +939,14 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
static int bnx2fc_em_config(struct fc_lport *lport)
{
+ int max_xid;
+
+ if (nr_cpu_ids <= 2)
+ max_xid = FCOE_XIDS_PER_CPU;
+ else
+ max_xid = FCOE_MAX_XID;
if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
- FCOE_MAX_XID, NULL)) {
+ max_xid, NULL)) {
printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
return -ENOMEM;
}
@@ -953,8 +958,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
{
lport->link_up = 0;
lport->qfull = 0;
- lport->max_retry_count = 3;
- lport->max_rport_retry_count = 3;
+ lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
+ lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
lport->e_d_tov = 2 * 1000;
lport->r_a_tov = 10 * 1000;
@@ -1537,6 +1542,7 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
+ struct workqueue_struct *timer_work_queue;
int rc = 0;
rtnl_lock();
@@ -1549,9 +1555,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
goto netdev_err;
}
-
- destroy_workqueue(interface->timer_work_queue);
+ timer_work_queue = interface->timer_work_queue;
__bnx2fc_destroy(interface);
+ destroy_workqueue(timer_work_queue);
netdev_err:
mutex_unlock(&bnx2fc_dev_lock);
@@ -2055,6 +2061,7 @@ if_create_err:
ifput_err:
bnx2fc_net_cleanup(interface);
bnx2fc_interface_put(interface);
+ goto mod_err;
netdev_err:
module_put(THIS_MODULE);
mod_err:
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 1923a25cb6a2..afd570962b8c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1716,15 +1716,19 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
/* Tx only */
bd_count = bd_tbl->bd_valid;
+ cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
if (task_type == FCOE_TASK_TYPE_WRITE) {
if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
+ cached_sge->cur_buf_addr.lo =
fcoe_bd_tbl->buf_addr_lo;
task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
+ cached_sge->cur_buf_addr.hi =
fcoe_bd_tbl->buf_addr_hi;
task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
+ cached_sge->cur_buf_rem =
fcoe_bd_tbl->buf_len;
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
@@ -1790,11 +1794,13 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task->rxwr_txrd.var_ctx.rx_id = 0xffff;
/* Rx Only */
- cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
+ if (task_type != FCOE_TASK_TYPE_READ)
+ return;
+
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
bd_count = bd_tbl->bd_valid;
- if (task_type == FCOE_TASK_TYPE_READ &&
- dev_type == TYPE_DISK) {
+
+ if (dev_type == TYPE_DISK) {
if (bd_count == 1) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 84a78af83f90..e897ce975bb8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1682,9 +1682,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sc_cmd->device->lun,
- (struct scsi_lun *) fcp_cmnd->fc_lun);
-
+ int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 57515f1f1690..495a841645f9 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -122,6 +122,7 @@
#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80)
+#define ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR (0x81)
/* SQ/RQ/CQ DB structure sizes */
#define ISCSI_SQ_DB_SIZE (16)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 1ad0b8225560..f9d6f4129093 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1312,14 +1312,18 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
/* EMC */
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
- if (error_mask1)
+ if (error_mask1) {
iscsi_init2.error_bit_map[0] = error_mask1;
- else
+ mask64 &= (u32)(~mask64);
+ mask64 |= error_mask1;
+ } else
iscsi_init2.error_bit_map[0] = (u32) mask64;
- if (error_mask2)
+ if (error_mask2) {
iscsi_init2.error_bit_map[1] = error_mask2;
- else
+ mask64 &= 0xffffffff;
+ mask64 |= ((u64)error_mask2 << 32);
+ } else
iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
iscsi_error_mask = mask64;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1a947f1b9729..4927cca733d3 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -49,11 +49,11 @@ module_param(en_tcp_dack, int, 0664);
MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
unsigned int error_mask1 = 0x00;
-module_param(error_mask1, int, 0664);
+module_param(error_mask1, uint, 0664);
MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
unsigned int error_mask2 = 0x00;
-module_param(error_mask2, int, 0664);
+module_param(error_mask2, uint, 0664);
MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
unsigned int sq_size;
@@ -393,8 +393,9 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu);
- thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
- "bnx2i_thread/%d", cpu);
+ thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
+ cpu_to_node(cpu),
+ "bnx2i_thread/%d", cpu);
/* bind thread to the cpu */
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d3ff9cd40234..d9253db1d0e2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
/* data fits in the skb's headroom */
for (i = 0; i < tdata->nr_frags; i++, frag++) {
- char *src = kmap_atomic(frag->page,
- KM_SOFTIRQ0);
+ char *src = kmap_atomic(frag->page);
memcpy(dst, src+frag->offset, frag->size);
dst += frag->size;
- kunmap_atomic(src, KM_SOFTIRQ0);
+ kunmap_atomic(src);
}
if (padlen) {
memset(dst, 0, padlen);
@@ -2148,11 +2147,10 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
struct cxgbi_sock *csk = cconn->cep->csk;
- int value, err = 0;
+ int err;
log_debug(1 << CXGBI_DBG_ISCSI,
"cls_conn 0x%p, param %d, buf(%d) %s.\n",
@@ -2174,15 +2172,7 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
conn->datadgst_en, 0);
break;
case ISCSI_PARAM_MAX_R2T:
- sscanf(buf, "%d", &value);
- if (value <= 0 || !is_power_of_2(value))
- return -EINVAL;
- if (session->max_r2t == value)
- break;
- iscsi_tcp_r2tpool_free(session);
- err = iscsi_set_param(cls_conn, param, buf, buflen);
- if (!err && iscsi_tcp_r2tpool_alloc(session))
- return -ENOMEM;
+ return iscsi_tcp_set_max_r2t(conn, buf);
case ISCSI_PARAM_MAX_RECV_DLENGTH:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err)
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index c2677ba29c74..4b11bb04f5c4 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -72,7 +72,6 @@
#endif
-#include <asm/system.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index e9599600aa23..335e85192807 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -168,6 +168,14 @@ static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_model_description = 1,
+ .show_host_hardware_version = 1,
+ .show_host_driver_version = 1,
+ .show_host_firmware_version = 1,
+ .show_host_optionrom_version = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
@@ -208,6 +216,14 @@ static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_model_description = 1,
+ .show_host_hardware_version = 1,
+ .show_host_driver_version = 1,
+ .show_host_firmware_version = 1,
+ .show_host_optionrom_version = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
@@ -364,11 +380,10 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
fcoe = ERR_PTR(-ENOMEM);
- goto out_nomod;
+ goto out_putmod;
}
dev_hold(netdev);
- kref_init(&fcoe->kref);
/*
* Initialize FIP.
@@ -384,54 +399,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
kfree(fcoe);
dev_put(netdev);
fcoe = ERR_PTR(err);
- goto out_nomod;
+ goto out_putmod;
}
goto out;
-out_nomod:
+out_putmod:
module_put(THIS_MODULE);
out:
return fcoe;
}
/**
- * fcoe_interface_release() - fcoe_port kref release function
- * @kref: Embedded reference count in an fcoe_interface struct
- */
-static void fcoe_interface_release(struct kref *kref)
-{
- struct fcoe_interface *fcoe;
- struct net_device *netdev;
-
- fcoe = container_of(kref, struct fcoe_interface, kref);
- netdev = fcoe->netdev;
- /* tear-down the FCoE controller */
- fcoe_ctlr_destroy(&fcoe->ctlr);
- kfree(fcoe);
- dev_put(netdev);
- module_put(THIS_MODULE);
-}
-
-/**
- * fcoe_interface_get() - Get a reference to a FCoE interface
- * @fcoe: The FCoE interface to be held
- */
-static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
-{
- kref_get(&fcoe->kref);
-}
-
-/**
- * fcoe_interface_put() - Put a reference to a FCoE interface
- * @fcoe: The FCoE interface to be released
- */
-static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
-{
- kref_put(&fcoe->kref, fcoe_interface_release);
-}
-
-/**
* fcoe_interface_cleanup() - Clean up a FCoE interface
* @fcoe: The FCoE interface to be cleaned up
*
@@ -478,7 +457,11 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
rtnl_unlock();
/* Release the self-reference taken during fcoe_interface_create() */
- fcoe_interface_put(fcoe);
+ /* tear-down the FCoE controller */
+ fcoe_ctlr_destroy(fip);
+ kfree(fcoe);
+ dev_put(netdev);
+ module_put(THIS_MODULE);
}
/**
@@ -734,6 +717,85 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
+
+/**
+ * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE
+ * @lport: The local port that is associated with the net device
+ * @netdev: The associated net device
+ *
+ * Must be called after fcoe_shost_config() as it will use local port mutex
+ *
+ */
+static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+ struct net_device *realdev;
+ int rc;
+ struct netdev_fcoe_hbainfo fdmi;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ realdev = fcoe->realdev;
+
+ if (!realdev)
+ return;
+
+ /* No FDMI state m/c for NPIV ports */
+ if (lport->vport)
+ return;
+
+ if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
+ memset(&fdmi, 0, sizeof(fdmi));
+ rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
+ &fdmi);
+ if (rc) {
+ printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
+ "information from netdev.\n");
+ return;
+ }
+
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%s",
+ fdmi.serial_number);
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%s",
+ fdmi.manufacturer);
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE,
+ "%s",
+ fdmi.model);
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE,
+ "%s",
+ fdmi.model_description);
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi.hardware_version);
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi.driver_version);
+ snprintf(fc_host_optionrom_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi.optionrom_version);
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE,
+ "%s",
+ fdmi.firmware_version);
+
+ /* Enable FDMI lport states */
+ lport->fdmi_enabled = 1;
+ } else {
+ lport->fdmi_enabled = 0;
+ printk(KERN_INFO "fcoe: No FDMI support.\n");
+ }
+}
+
/**
* fcoe_oem_match() - The match routine for the offloaded exchange manager
* @fp: The I/O frame
@@ -881,9 +943,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
dev_uc_del(netdev, port->data_src_addr);
rtnl_unlock();
- /* Release reference held in fcoe_if_create() */
- fcoe_interface_put(fcoe);
-
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
@@ -1047,6 +1106,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
+ /* Initialized FDMI information */
+ fcoe_fdmi_info(lport, netdev);
+
/*
* fcoe_em_alloc() and fcoe_hostlist_add() both
* need to be atomic with respect to other changes to the
@@ -1070,7 +1132,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
- fcoe_interface_get(fcoe);
return lport;
out_lp_destroy:
@@ -1375,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
goto err;
fps = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&fps->fcoe_rx_list.lock);
+ spin_lock(&fps->fcoe_rx_list.lock);
if (unlikely(!fps->thread)) {
/*
* The targeted CPU is not ready, let's target
@@ -1386,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
"ready for incoming skb- using first online "
"CPU.\n");
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
+ spin_unlock(&fps->fcoe_rx_list.lock);
cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&fps->fcoe_rx_list.lock);
+ spin_lock(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
+ spin_unlock(&fps->fcoe_rx_list.lock);
goto err;
}
}
@@ -1402,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
* so we're free to queue skbs into it's queue.
*/
- /* If this is a SCSI-FCP frame, and this is already executing on the
- * correct CPU, and the queue for this CPU is empty, then go ahead
- * and process the frame directly in the softirq context.
- * This lets us process completions without context switching from the
- * NET_RX softirq, to our receive processing thread, and then back to
- * BLOCK softirq context.
+ /*
+ * Note: We used to have a set of conditions under which we would
+ * call fcoe_recv_frame directly, rather than queuing to the rx list
+ * as it could save a few cycles, but doing so is prohibited, as
+ * fcoe_recv_frame has several paths that may sleep, which is forbidden
+ * in softirq context.
*/
- if (fh->fh_type == FC_TYPE_FCP &&
- cpu == smp_processor_id() &&
- skb_queue_empty(&fps->fcoe_rx_list)) {
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
- fcoe_recv_frame(skb);
- } else {
- __skb_queue_tail(&fps->fcoe_rx_list, skb);
- if (fps->fcoe_rx_list.qlen == 1)
- wake_up_process(fps->thread);
- spin_unlock_bh(&fps->fcoe_rx_list.lock);
- }
+ __skb_queue_tail(&fps->fcoe_rx_list, skb);
+ if (fps->thread->state == TASK_INTERRUPTIBLE)
+ wake_up_process(fps->thread);
+ spin_unlock(&fps->fcoe_rx_list.lock);
return 0;
err:
@@ -1498,7 +1552,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* crc offload */
if (likely(lport->crc_offload)) {
- skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_start = skb_headroom(skb);
skb->csum_offset = skb->len;
crc = 0;
@@ -1515,7 +1569,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+ cp = kmap_atomic(skb_frag_page(frag))
+ frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -1526,7 +1580,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) {
- kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ kunmap_atomic(cp);
cp = NULL;
}
@@ -1736,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
+ struct sk_buff_head tmp;
+
+ skb_queue_head_init(&tmp);
set_user_nice(current, -20);
while (!kthread_should_stop()) {
spin_lock_bh(&p->fcoe_rx_list.lock);
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+ skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+ while ((skb = __skb_dequeue(&tmp)) != NULL)
+ fcoe_recv_frame(skb);
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ if (!skb_queue_len(&p->fcoe_rx_list)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
set_current_state(TASK_RUNNING);
- if (kthread_should_stop())
- return 0;
- spin_lock_bh(&p->fcoe_rx_list.lock);
- }
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- fcoe_recv_frame(skb);
+ } else
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
}
return 0;
}
@@ -2009,20 +2069,13 @@ static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
- int npiv = 0;
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
- /* set if this is an NPIV port */
- npiv = port->lport->vport ? 1 : 0;
-
fcoe = port->priv;
fcoe_if_destroy(port->lport);
-
- /* Do not tear down the fcoe interface for NPIV port */
- if (!npiv)
- fcoe_interface_cleanup(fcoe);
+ fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
}
@@ -2133,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport))
+ if (!fcoe_link_ok(lport)) {
+ rtnl_unlock();
fcoe_ctlr_link_up(&fcoe->ctlr);
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+ }
out_nodev:
rtnl_unlock();
@@ -2207,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport)
static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
- struct fcoe_rcv_info *fr;
- struct sk_buff_head *list;
- struct sk_buff *skb, *next;
- struct sk_buff *head;
+ struct sk_buff *skb;
unsigned int cpu;
for_each_possible_cpu(cpu) {
pp = &per_cpu(fcoe_percpu, cpu);
- spin_lock_bh(&pp->fcoe_rx_list.lock);
- list = &pp->fcoe_rx_list;
- head = list->next;
- for (skb = head; skb != (struct sk_buff *)list;
- skb = next) {
- next = skb->next;
- fr = fcoe_dev_from_skb(skb);
- if (fr->fr_dev == lport) {
- __skb_unlink(skb, list);
- kfree_skb(skb);
- }
- }
- if (!pp->thread || !cpu_online(cpu)) {
- spin_unlock_bh(&pp->fcoe_rx_list.lock);
+ if (!pp->thread || !cpu_online(cpu))
continue;
- }
skb = dev_alloc_skb(0);
if (!skb) {
@@ -2240,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
}
skb->destructor = fcoe_percpu_flush_done;
+ spin_lock_bh(&pp->fcoe_rx_list.lock);
__skb_queue_tail(&pp->fcoe_rx_list, skb);
if (pp->fcoe_rx_list.qlen == 1)
wake_up_process(pp->thread);
@@ -2593,12 +2634,15 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port = vport->dd_data;
- struct fcoe_port *port = lport_priv(vn_port);
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
- queue_work(fcoe_wq, &port->destroy_work);
+
+ mutex_lock(&fcoe_config_mutex);
+ fcoe_if_destroy(vn_port);
+ mutex_unlock(&fcoe_config_mutex);
+
return 0;
}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index bcc89e639495..3c2733a12aa1 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,8 +71,6 @@ do { \
* @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
- * @kref: The kernel reference
- *
* This structure is 1:1 with a net devive.
*/
struct fcoe_interface {
@@ -83,7 +81,6 @@ struct fcoe_interface {
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
- struct kref kref;
};
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index e7522dcc296e..249a106888d9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
printk(KERN_INFO "libfcoe: host%d: FIP selected "
"Fibre-Channel Forwarder MAC %pM\n",
fip->lp->host->host_no, sel->fcf_mac);
- memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+ memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
fip->map_dest = 0;
}
unlock:
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
memcpy(fcf->fcf_mac,
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
+ memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
if (!is_valid_ether_addr(fcf->fcf_mac)) {
LIBFCOE_FIP_DBG(fip,
"Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_desc *desc;
struct fip_encaps *els;
struct fcoe_dev_stats *stats;
+ struct fcoe_fcf *sel;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
/* Drop ELS if there are duplicate critical descriptors */
if (desc->fip_dtype < 32) {
- if (desc_mask & 1U << desc->fip_dtype) {
+ if ((desc->fip_dtype != FIP_DT_MAC) &&
+ (desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
"Descriptors in FIP ELS\n");
goto drop;
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
switch (desc->fip_dtype) {
case FIP_DT_MAC:
+ sel = fip->sel_fcf;
if (desc_cnt == 1) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors "
"received out of order\n");
goto drop;
}
+ /*
+ * Some switch implementations send two MAC descriptors,
+ * with first MAC(granted_mac) being the FPMA, and the
+ * second one(fcoe_mac) is used as destination address
+ * for sending/receiving FCoE packets. FIP traffic is
+ * sent using fip_mac. For regular switches, both
+ * fip_mac and fcoe_mac would be the same.
+ */
+ if (desc_cnt == 2)
+ memcpy(granted_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
if (dlen != sizeof(struct fip_mac_desc))
goto len_err;
- memcpy(granted_mac,
- ((struct fip_mac_desc *)desc)->fd_mac,
- ETH_ALEN);
+
+ if ((desc_cnt == 3) && (sel))
+ memcpy(sel->fcoe_mac,
+ ((struct fip_mac_desc *)desc)->fd_mac,
+ ETH_ALEN);
break;
case FIP_DT_FLOGI:
case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* No Vx_Port description. Clear all NPIV ports,
* followed by physical port
*/
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vn_port, &lport->vports, list)
- fc_lport_reset(vn_port);
- mutex_unlock(&lport->lp_mutex);
-
mutex_lock(&fip->ctlr_mutex);
per_cpu_ptr(lport->dev_stats,
get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vn_port, &lport->vports, list)
+ fc_lport_reset(vn_port);
+ mutex_unlock(&lport->lp_mutex);
+
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
} else {
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index bd97b2273f20..710e149d41b6 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
data = kmap_atomic(
- skb_frag_page(frag) + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
+ skb_frag_page(frag) + (off >> PAGE_SHIFT));
crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
- kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ kunmap_atomic(data);
off += clen;
len -= clen;
}
@@ -620,8 +619,8 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
switch (event) {
case NETDEV_UNREGISTER:
- printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
- netdev->name);
+ LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n",
+ netdev->name);
fcoe_del_netdev_mapping(netdev);
break;
}
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index a2c6135d337e..53bfcaa86f09 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -93,7 +93,6 @@
#include <linux/mca-legacy.h>
#include <asm/io.h>
-#include <asm/system.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 643f6d500fe7..1a2a1e5824e3 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -282,7 +282,6 @@
#include <linux/slab.h>
#include <scsi/scsicam.h>
-#include <asm/system.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 81182badfeb1..1a5954f0915a 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -100,7 +100,6 @@
#undef NCR5380_STAT_LIMIT
#endif
-#include <asm/system.h>
#include <asm/io.h>
#include <linux/signal.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 3242bcabad97..5d72274c507f 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -129,7 +129,6 @@
#include <linux/reboot.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/spinlock.h>
@@ -2310,10 +2309,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
return;
}
local_irq_save(flags);
- address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
+ address = kmap_atomic(sg_page(sl)) + sl->offset;
memcpy(address, buffer, cpnow);
flush_dcache_page(sg_page(sl));
- kunmap_atomic(address, KM_BIO_SRC_IRQ);
+ kunmap_atomic(address);
local_irq_restore(flags);
if (cpsum == cpcount)
break;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b96962c39449..500e20dd56ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -56,6 +56,7 @@
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
#define HPSA_DRIVER_VERSION "2.0.2-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+#define HPSA "hpsa"
/* How long to wait (in milliseconds) for board to go into simple mode */
#define MAX_CONFIG_WAIT 30000
@@ -202,30 +203,31 @@ static int check_for_unit_attention(struct ctlr_info *h,
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
- dev_warn(&h->pdev->dev, "hpsa%d: a state change "
+ dev_warn(&h->pdev->dev, HPSA "%d: a state change "
"detected, command retried\n", h->ctlr);
break;
case LUN_FAILED:
- dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
+ dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
"detected, action required\n", h->ctlr);
break;
case REPORT_LUNS_CHANGED:
- dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+ dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
"changed, action required\n", h->ctlr);
/*
- * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+ * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
+ * target (array) devices.
*/
break;
case POWER_OR_RESET:
- dev_warn(&h->pdev->dev, "hpsa%d: a power on "
+ dev_warn(&h->pdev->dev, HPSA "%d: a power on "
"or device reset detected\n", h->ctlr);
break;
case UNIT_ATTENTION_CLEARED:
- dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
+ dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
"cleared by another initiator\n", h->ctlr);
break;
default:
- dev_warn(&h->pdev->dev, "hpsa%d: unknown "
+ dev_warn(&h->pdev->dev, HPSA "%d: unknown "
"unit attention detected\n", h->ctlr);
break;
}
@@ -296,11 +298,23 @@ static u32 unresettable_controller[] = {
0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
0x40800E11, /* Smart Array 5i */
+ 0x40700E11, /* Smart Array 5300 */
+ 0x40820E11, /* Smart Array 532 */
+ 0x40830E11, /* Smart Array 5312 */
+ 0x409A0E11, /* Smart Array 641 */
+ 0x409B0E11, /* Smart Array 642 */
+ 0x40910E11, /* Smart Array 6i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
@@ -475,8 +489,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
- .name = "hpsa",
- .proc_name = "hpsa",
+ .name = HPSA,
+ .proc_name = HPSA,
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
@@ -577,21 +591,19 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
int i, found = 0;
DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
- memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
+ bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
- set_bit(h->dev[i]->target, lun_taken);
+ __set_bit(h->dev[i]->target, lun_taken);
}
- for (i = 0; i < HPSA_MAX_DEVICES; i++) {
- if (!test_bit(i, lun_taken)) {
- /* *bus = 1; */
- *target = i;
- *lun = 0;
- found = 1;
- break;
- }
+ i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
+ if (i < HPSA_MAX_DEVICES) {
+ /* *bus = 1; */
+ *target = i;
+ *lun = 0;
+ found = 1;
}
return !found;
}
@@ -675,6 +687,20 @@ lun_assigned:
return 0;
}
+/* Update an entry in h->dev[] array. */
+static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
+ int entry, struct hpsa_scsi_dev_t *new_entry)
+{
+ /* assumes h->devlock is held */
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
+
+ /* Raid level changed. */
+ h->dev[entry]->raid_level = new_entry->raid_level;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
+ scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+ new_entry->target, new_entry->lun);
+}
+
/* Replace an entry from h->dev[] array. */
static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry,
@@ -781,10 +807,25 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
return 1;
}
+static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
+ struct hpsa_scsi_dev_t *dev2)
+{
+ /* Device attributes that can change, but don't mean
+ * that the device is a different device, nor that the OS
+ * needs to be told anything about the change.
+ */
+ if (dev1->raid_level != dev2->raid_level)
+ return 1;
+ return 0;
+}
+
/* Find needle in haystack. If exact match found, return DEVICE_SAME,
* and return needle location in *index. If scsi3addr matches, but not
* vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
- * location in *index. If needle not found, return DEVICE_NOT_FOUND.
+ * location in *index.
+ * In the case of a minor device attribute change, such as RAID level, just
+ * return DEVICE_UPDATED, along with the updated device's location in index.
+ * If needle not found, return DEVICE_NOT_FOUND.
*/
static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
struct hpsa_scsi_dev_t *haystack[], int haystack_size,
@@ -794,15 +835,19 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
#define DEVICE_NOT_FOUND 0
#define DEVICE_CHANGED 1
#define DEVICE_SAME 2
+#define DEVICE_UPDATED 3
for (i = 0; i < haystack_size; i++) {
if (haystack[i] == NULL) /* previously removed. */
continue;
if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
*index = i;
- if (device_is_the_same(needle, haystack[i]))
+ if (device_is_the_same(needle, haystack[i])) {
+ if (device_updated(needle, haystack[i]))
+ return DEVICE_UPDATED;
return DEVICE_SAME;
- else
+ } else {
return DEVICE_CHANGED;
+ }
}
}
*index = -1;
@@ -838,6 +883,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
* sd[] and remove them from h->dev[], and for any
* devices which have changed, remove the old device
* info and add the new device info.
+ * If minor device attributes change, just update
+ * the existing device structure.
*/
i = 0;
nremoved = 0;
@@ -858,6 +905,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
* at the bottom of hpsa_update_scsi_devices()
*/
sd[entry] = NULL;
+ } else if (device_change == DEVICE_UPDATED) {
+ hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
}
i++;
}
@@ -1257,46 +1306,6 @@ static void complete_scsi_command(struct CommandList *cp)
cmd_free(h, cp);
}
-static int hpsa_scsi_detect(struct ctlr_info *h)
-{
- struct Scsi_Host *sh;
- int error;
-
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
- if (sh == NULL)
- goto fail;
-
- sh->io_port = 0;
- sh->n_io_port = 0;
- sh->this_id = -1;
- sh->max_channel = 3;
- sh->max_cmd_len = MAX_COMMAND_SIZE;
- sh->max_lun = HPSA_MAX_LUN;
- sh->max_id = HPSA_MAX_LUN;
- sh->can_queue = h->nr_cmds;
- sh->cmd_per_lun = h->nr_cmds;
- sh->sg_tablesize = h->maxsgentries;
- h->scsi_host = sh;
- sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[h->intr_mode];
- sh->unique_id = sh->irq;
- error = scsi_add_host(sh, &h->pdev->dev);
- if (error)
- goto fail_host_put;
- scsi_scan_host(sh);
- return 0;
-
- fail_host_put:
- dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
- " failed for controller %d\n", h->ctlr);
- scsi_host_put(sh);
- return error;
- fail:
- dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
- " failed for controller %d\n", h->ctlr);
- return -ENOMEM;
-}
-
static void hpsa_pci_unmap(struct pci_dev *pdev,
struct CommandList *c, int sg_used, int data_direction)
{
@@ -1641,7 +1650,7 @@ bail_out:
return 1;
}
-static unsigned char *msa2xxx_model[] = {
+static unsigned char *ext_target_model[] = {
"MSA2012",
"MSA2024",
"MSA2312",
@@ -1650,78 +1659,54 @@ static unsigned char *msa2xxx_model[] = {
NULL,
};
-static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
{
int i;
- for (i = 0; msa2xxx_model[i]; i++)
- if (strncmp(device->model, msa2xxx_model[i],
- strlen(msa2xxx_model[i])) == 0)
+ for (i = 0; ext_target_model[i]; i++)
+ if (strncmp(device->model, ext_target_model[i],
+ strlen(ext_target_model[i])) == 0)
return 1;
return 0;
}
/* Helper function to assign bus, target, lun mapping of devices.
- * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
+ * Puts non-external target logical volumes on bus 0, external target logical
* volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
* Logical drive target and lun are assigned at this time, but
* physical device lun and target assignment are deferred (assigned
* in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
*/
static void figure_bus_target_lun(struct ctlr_info *h,
- u8 *lunaddrbytes, int *bus, int *target, int *lun,
- struct hpsa_scsi_dev_t *device)
+ u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
{
- u32 lunid;
+ u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
- if (is_logical_dev_addr_mode(lunaddrbytes)) {
- /* logical device */
- if (unlikely(is_scsi_rev_5(h))) {
- /* p1210m, logical drives lun assignments
- * match SCSI REPORT LUNS data.
- */
- lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
- *bus = 0;
- *target = 0;
- *lun = (lunid & 0x3fff) + 1;
- } else {
- /* not p1210m... */
- lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
- if (is_msa2xxx(h, device)) {
- /* msa2xxx way, put logicals on bus 1
- * and match target/lun numbers box
- * reports.
- */
- *bus = 1;
- *target = (lunid >> 16) & 0x3fff;
- *lun = lunid & 0x00ff;
- } else {
- /* Traditional smart array way. */
- *bus = 0;
- *lun = 0;
- *target = lunid & 0x3fff;
- }
- }
- } else {
- /* physical device */
+ if (!is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* physical device, target and lun filled in later */
if (is_hba_lunid(lunaddrbytes))
- if (unlikely(is_scsi_rev_5(h))) {
- *bus = 0; /* put p1210m ctlr at 0,0,0 */
- *target = 0;
- *lun = 0;
- return;
- } else
- *bus = 3; /* traditional smartarray */
+ hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
else
- *bus = 2; /* physical disk */
- *target = -1;
- *lun = -1; /* we will fill these in later. */
+ /* defer target, lun assignment for physical devices */
+ hpsa_set_bus_target_lun(device, 2, -1, -1);
+ return;
+ }
+ /* It's a logical device */
+ if (is_ext_target(h, device)) {
+ /* external target way, put logicals on bus 1
+ * and match target/lun numbers box
+ * reports, other smart array, bus 0, target 0, match lunid
+ */
+ hpsa_set_bus_target_lun(device,
+ 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
+ return;
}
+ hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
}
/*
* If there is no lun 0 on a target, linux won't find any devices.
- * For the MSA2xxx boxes, we have to manually detect the enclosure
+ * For the external targets (arrays), we have to manually detect the enclosure
* which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
* it for some reason. *tmpdevice is the target we're adding,
* this_device is a pointer into the current element of currentsd[]
@@ -1730,46 +1715,46 @@ static void figure_bus_target_lun(struct ctlr_info *h,
* lun 0 assigned.
* Returns 1 if an enclosure was added, 0 if not.
*/
-static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+static int add_ext_target_dev(struct ctlr_info *h,
struct hpsa_scsi_dev_t *tmpdevice,
struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
- int bus, int target, int lun, unsigned long lunzerobits[],
- int *nmsa2xxx_enclosures)
+ unsigned long lunzerobits[], int *n_ext_target_devs)
{
unsigned char scsi3addr[8];
- if (test_bit(target, lunzerobits))
+ if (test_bit(tmpdevice->target, lunzerobits))
return 0; /* There is already a lun 0 on this target. */
if (!is_logical_dev_addr_mode(lunaddrbytes))
return 0; /* It's the logical targets that may lack lun 0. */
- if (!is_msa2xxx(h, tmpdevice))
- return 0; /* It's only the MSA2xxx that have this problem. */
+ if (!is_ext_target(h, tmpdevice))
+ return 0; /* Only external target devices have this problem. */
- if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
+ if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
return 0;
memset(scsi3addr, 0, 8);
- scsi3addr[3] = target;
+ scsi3addr[3] = tmpdevice->target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
if (is_scsi_rev_5(h))
return 0; /* p1210m doesn't need to do this. */
- if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
- dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
- "enclosures exceeded. Check your hardware "
+ if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
+ dev_warn(&h->pdev->dev, "Maximum number of external "
+ "target devices exceeded. Check your hardware "
"configuration.");
return 0;
}
if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
- (*nmsa2xxx_enclosures)++;
- hpsa_set_bus_target_lun(this_device, bus, target, 0);
- set_bit(target, lunzerobits);
+ (*n_ext_target_devs)++;
+ hpsa_set_bus_target_lun(this_device,
+ tmpdevice->bus, tmpdevice->target, 0);
+ set_bit(tmpdevice->target, lunzerobits);
return 1;
}
@@ -1863,10 +1848,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
- int i, nmsa2xxx_enclosures, ndevs_to_allocate;
- int bus, target, lun;
+ int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
- DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+ DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
@@ -1883,11 +1867,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
logdev_list, &nlogicals))
goto out;
- /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
- * but each of them 4 times through different paths. The plus 1
- * is for the RAID controller.
+ /* We might see up to the maximum number of logical and physical disks
+ * plus external target devices, and a device for the local RAID
+ * controller.
*/
- ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
+ ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
@@ -1913,7 +1897,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
- nmsa2xxx_enclosures = 0;
+ n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
@@ -1929,26 +1913,24 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR))
continue; /* skip it if we can't talk to it. */
- figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
- tmpdevice);
+ figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
/*
- * For the msa2xxx boxes, we have to insert a LUN 0 which
+ * For external target devices, we have to insert a LUN 0 which
* doesn't show up in CCISS_REPORT_PHYSICAL data, but there
* is nonetheless an enclosure device there. We have to
* present that otherwise linux won't find anything if
* there is no lun 0.
*/
- if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
- lunaddrbytes, bus, target, lun, lunzerobits,
- &nmsa2xxx_enclosures)) {
+ if (add_ext_target_dev(h, tmpdevice, this_device,
+ lunaddrbytes, lunzerobits,
+ &n_ext_target_devs)) {
ncurrent++;
this_device = currentsd[ncurrent];
}
*this_device = *tmpdevice;
- hpsa_set_bus_target_lun(this_device, bus, target, lun);
switch (this_device->devtype) {
case TYPE_ROM:
@@ -2228,13 +2210,42 @@ static void hpsa_unregister_scsi(struct ctlr_info *h)
static int hpsa_register_scsi(struct ctlr_info *h)
{
- int rc;
+ struct Scsi_Host *sh;
+ int error;
- rc = hpsa_scsi_detect(h);
- if (rc != 0)
- dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
- " hpsa_scsi_detect(), rc is %d\n", rc);
- return rc;
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ if (sh == NULL)
+ goto fail;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->this_id = -1;
+ sh->max_channel = 3;
+ sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_lun = HPSA_MAX_LUN;
+ sh->max_id = HPSA_MAX_LUN;
+ sh->can_queue = h->nr_cmds;
+ sh->cmd_per_lun = h->nr_cmds;
+ sh->sg_tablesize = h->maxsgentries;
+ h->scsi_host = sh;
+ sh->hostdata[0] = (unsigned long) h;
+ sh->irq = h->intr[h->intr_mode];
+ sh->unique_id = sh->irq;
+ error = scsi_add_host(sh, &h->pdev->dev);
+ if (error)
+ goto fail_host_put;
+ scsi_scan_host(sh);
+ return 0;
+
+ fail_host_put:
+ dev_err(&h->pdev->dev, "%s: scsi_add_host"
+ " failed for controller %d\n", __func__, h->ctlr);
+ scsi_host_put(sh);
+ return error;
+ fail:
+ dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
+ " failed for controller %d\n", __func__, h->ctlr);
+ return -ENOMEM;
}
static int wait_for_device_to_become_ready(struct ctlr_info *h,
@@ -2700,16 +2711,16 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EINVAL;
goto cleanup1;
}
- if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+ if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
status = -EINVAL;
goto cleanup1;
}
- buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+ buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
if (!buff) {
status = -ENOMEM;
goto cleanup1;
}
- buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
+ buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
if (!buff_size) {
status = -ENOMEM;
goto cleanup1;
@@ -3354,7 +3365,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static __devinit void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
- strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
+ strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
}
static __devinit int write_driver_ver_to_cfgtable(
@@ -3935,7 +3946,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
return err;
}
- err = pci_request_regions(h->pdev, "hpsa");
+ err = pci_request_regions(h->pdev, HPSA);
if (err) {
dev_err(&h->pdev->dev,
"cannot obtain PCI resources, aborting\n");
@@ -4253,7 +4264,7 @@ static void start_controller_lockup_detector(struct ctlr_info *h)
spin_lock_init(&lockup_detector_lock);
hpsa_lockup_detector =
kthread_run(detect_controller_lockup_thread,
- NULL, "hpsa");
+ NULL, HPSA);
}
if (!hpsa_lockup_detector) {
dev_warn(&h->pdev->dev,
@@ -4325,7 +4336,7 @@ reinit_after_soft_reset:
if (rc != 0)
goto clean1;
- sprintf(h->devname, "hpsa%d", number_of_controllers);
+ sprintf(h->devname, HPSA "%d", number_of_controllers);
h->ctlr = number_of_controllers;
number_of_controllers++;
@@ -4482,6 +4493,14 @@ static void hpsa_shutdown(struct pci_dev *pdev)
#endif /* CONFIG_PCI_MSI */
}
+static void __devexit hpsa_free_device_info(struct ctlr_info *h)
+{
+ int i;
+
+ for (i = 0; i < h->ndevices; i++)
+ kfree(h->dev[i]);
+}
+
static void __devexit hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
@@ -4497,6 +4516,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
iounmap(h->vaddr);
iounmap(h->transtable);
iounmap(h->cfgtable);
+ hpsa_free_device_info(h);
hpsa_free_sg_chain_blocks(h);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
@@ -4530,7 +4550,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
}
static struct pci_driver hpsa_pci_driver = {
- .name = "hpsa",
+ .name = HPSA,
.probe = hpsa_init_one,
.remove = __devexit_p(hpsa_remove_one),
.id_table = hpsa_pci_device_id, /* id_table */
@@ -4592,15 +4612,15 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
* Each SG entry requires 16 bytes. The eight registers are programmed
* with the number of 16-byte blocks a command of that size requires.
* The smallest command possible requires 5 such 16 byte blocks.
- * the largest command possible requires MAXSGENTRIES + 4 16-byte
+ * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
* blocks. Note, this only extends to the SG entries contained
* within the command block, and does not extend to chained blocks
* of SG elements. bft[] contains the eight values we write to
* the registers. They are not evenly distributed, but have more
* sizes for small commands, and fewer sizes for larger commands.
*/
- int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
- BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+ int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+ BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
* 8 = 4 s/g entry or 16k
@@ -4613,8 +4633,9 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
memset(h->reply_pool, 0, h->reply_pool_size);
h->reply_pool_head = h->reply_pool;
- bft[7] = h->max_sg_entries + 4;
- calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
+ bft[7] = SG_ENTRIES_IN_CMD + 4;
+ calc_bucket_map(bft, ARRAY_SIZE(bft),
+ SG_ENTRIES_IN_CMD, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
@@ -4652,14 +4673,13 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return;
hpsa_get_max_perf_mode_cmds(h);
- h->max_sg_entries = 32;
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64);
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
/* Need a block fetch table for performant mode */
- h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
+ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
if ((h->reply_pool == NULL)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 91edafb8c7e6..7b28d54fa878 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -58,7 +58,6 @@ struct ctlr_info {
unsigned long paddr;
int nr_cmds; /* Number of commands allowed on this controller */
struct CfgTable __iomem *cfgtable;
- int max_sg_entries;
int interrupts_enabled;
int major;
int max_commands;
@@ -317,7 +316,7 @@ static unsigned long SA5_completed(struct ctlr_info *h)
dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
register_value);
else
- dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
+ dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
#endif
return register_value;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3fd4715935c2..8049815d8c1e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,7 @@
/* general boundary defintions */
#define SENSEINFOBYTES 32 /* may vary between hbas */
-#define MAXSGENTRIES 32
+#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
#define HPSA_SG_CHAIN 0x80000000
#define MAXREPLYQS 256
@@ -122,12 +122,11 @@ union u64bit {
};
/* FIXME this is a per controller value (barf!) */
-#define HPSA_MAX_TARGETS_PER_CTLR 16
#define HPSA_MAX_LUN 1024
#define HPSA_MAX_PHYS_LUN 1024
-#define MAX_MSA2XXX_ENCLOSURES 32
+#define MAX_EXT_TARGETS 32
#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
- MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
+ MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
/* SCSI-3 Commands */
#pragma pack(1)
@@ -282,7 +281,7 @@ struct CommandList {
struct CommandListHeader Header;
struct RequestBlock Request;
struct ErrDescriptor ErrDesc;
- struct SGDescriptor SG[MAXSGENTRIES];
+ struct SGDescriptor SG[SG_ENTRIES_IN_CMD];
/* information associated with the command */
u32 busaddr; /* physical addr of this record */
struct ErrorInfo *err_info; /* pointer to the allocated mem */
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 67fc8ffd52e6..cd09132d5d7d 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -32,7 +32,6 @@
#include <linux/spinlock.h>
#include <linux/init.h>
-#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index a423d9633625..ff5b5c5538ee 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
ibmvscsic-y += ibmvscsi.o
-ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index bdfa223a7dbb..134a0ae85bb7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4890,11 +4890,8 @@ static struct vio_driver ibmvfc_driver = {
.probe = ibmvfc_probe,
.remove = ibmvfc_remove,
.get_desired_dma = ibmvfc_get_desired_dma,
- .driver = {
- .name = IBMVFC_NAME,
- .owner = THIS_MODULE,
- .pm = &ibmvfc_pm_ops,
- }
+ .name = IBMVFC_NAME,
+ .pm = &ibmvfc_pm_ops,
};
static struct fc_function_template ibmvfc_transport_functions = {
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 3d391dc3f11f..3a6c4742951e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -55,13 +55,7 @@
* and sends a CRQ message back to inform the client that the request has
* completed.
*
- * Note that some of the underlying infrastructure is different between
- * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
- * the older iSeries hypervisor models. To support both, some low level
- * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
- * The Makefile should pick one, not two, not zero, of these.
- *
- * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
+ * TODO: This is currently pretty tied to the IBM pSeries hypervisor
* interfaces. It would be really nice to abstract this above an RDMA
* layer.
*/
@@ -2067,11 +2061,8 @@ static struct vio_driver ibmvscsi_driver = {
.probe = ibmvscsi_probe,
.remove = ibmvscsi_remove,
.get_desired_dma = ibmvscsi_get_desired_dma,
- .driver = {
- .name = "ibmvscsi",
- .owner = THIS_MODULE,
- .pm = &ibmvscsi_pm_ops,
- }
+ .name = "ibmvscsi",
+ .pm = &ibmvscsi_pm_ops,
};
static struct srp_function_template ibmvscsi_transport_functions = {
@@ -2085,9 +2076,7 @@ int __init ibmvscsi_module_init(void)
driver_template.can_queue = max_requests;
max_events = max_requests + 2;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- ibmvscsi_ops = &iseriesvscsi_ops;
- else if (firmware_has_feature(FW_FEATURE_VIO))
+ if (firmware_has_feature(FW_FEATURE_VIO))
ibmvscsi_ops = &rpavscsi_ops;
else
return -ENODEV;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 02197a2b22b9..c503e1776014 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -127,7 +127,6 @@ struct ibmvscsi_ops {
int (*resume) (struct ibmvscsi_host_data *hostdata);
};
-extern struct ibmvscsi_ops iseriesvscsi_ops;
extern struct ibmvscsi_ops rpavscsi_ops;
#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2256babe0474..aa7ed81e9237 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -918,10 +918,7 @@ static struct vio_driver ibmvstgt_driver = {
.id_table = ibmvstgt_device_table,
.probe = ibmvstgt_probe,
.remove = ibmvstgt_remove,
- .driver = {
- .name = "ibmvscsis",
- .owner = THIS_MODULE,
- }
+ .name = "ibmvscsis",
};
static int get_system_info(void)
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
deleted file mode 100644
index f4776451a754..000000000000
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* ------------------------------------------------------------
- * iSeries_vscsi.c
- * (C) Copyright IBM Corporation 1994, 2003
- * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
- * Santiago Leon (santil@us.ibm.com)
- * Dave Boutcher (sleddog@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- * ------------------------------------------------------------
- * iSeries-specific functions of the SCSI host adapter for Virtual I/O devices
- *
- * This driver allows the Linux SCSI peripheral drivers to directly
- * access devices in the hosting partition, either on an iSeries
- * hypervisor system or a converged hypervisor system.
- */
-
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/vio.h>
-#include <linux/device.h>
-#include "ibmvscsi.h"
-
-/* global variables */
-static struct ibmvscsi_host_data *single_host_data;
-
-/* ------------------------------------------------------------
- * Routines for direct interpartition interaction
- */
-struct srp_lp_event {
- struct HvLpEvent lpevt; /* 0x00-0x17 */
- u32 reserved1; /* 0x18-0x1B; unused */
- u16 version; /* 0x1C-0x1D; unused */
- u16 subtype_rc; /* 0x1E-0x1F; unused */
- struct viosrp_crq crq; /* 0x20-0x3F */
-};
-
-/**
- * standard interface for handling logical partition events.
- */
-static void iseriesvscsi_handle_event(struct HvLpEvent *lpevt)
-{
- struct srp_lp_event *evt = (struct srp_lp_event *)lpevt;
-
- if (!evt) {
- printk(KERN_ERR "ibmvscsi: received null event\n");
- return;
- }
-
- if (single_host_data == NULL) {
- printk(KERN_ERR
- "ibmvscsi: received event, no adapter present\n");
- return;
- }
-
- ibmvscsi_handle_crq(&evt->crq, single_host_data);
-}
-
-/* ------------------------------------------------------------
- * Routines for driver initialization
- */
-static int iseriesvscsi_init_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- int rc;
-
- single_host_data = hostdata;
- rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
- if (rc < 0) {
- printk("viopath_open failed with rc %d in open_event_path\n",
- rc);
- goto viopath_open_failed;
- }
-
- rc = vio_setHandler(viomajorsubtype_scsi, iseriesvscsi_handle_event);
- if (rc < 0) {
- printk("vio_setHandler failed with rc %d in open_event_path\n",
- rc);
- goto vio_setHandler_failed;
- }
- return 0;
-
- vio_setHandler_failed:
- viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
- viopath_open_failed:
- return -1;
-}
-
-static void iseriesvscsi_release_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata,
- int max_requests)
-{
- vio_clearHandler(viomajorsubtype_scsi);
- viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
-}
-
-/**
- * reset_crq_queue: - resets a crq after a failure
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- * no-op for iSeries
- */
-static int iseriesvscsi_reset_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-/**
- * reenable_crq_queue: - reenables a crq after a failure
- * @queue: crq_queue to initialize and register
- * @hostdata: ibmvscsi_host_data of host
- *
- * no-op for iSeries
- */
-static int iseriesvscsi_reenable_crq_queue(struct crq_queue *queue,
- struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-/**
- * iseriesvscsi_send_crq: - Send a CRQ
- * @hostdata: the adapter
- * @word1: the first 64 bits of the data
- * @word2: the second 64 bits of the data
- */
-static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
- u64 word1, u64 word2)
-{
- single_host_data = hostdata;
- return HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_scsi,
- HvLpEvent_AckInd_NoAck,
- HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- 0,
- VIOVERSION << 16, word1, word2, 0,
- 0);
-}
-
-static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
-{
- return 0;
-}
-
-struct ibmvscsi_ops iseriesvscsi_ops = {
- .init_crq_queue = iseriesvscsi_init_crq_queue,
- .release_crq_queue = iseriesvscsi_release_crq_queue,
- .reset_crq_queue = iseriesvscsi_reset_crq_queue,
- .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
- .send_crq = iseriesvscsi_send_crq,
- .resume = iseriesvscsi_resume,
-};
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 112f1bec7756..deb5b6d8398e 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -123,7 +123,6 @@
#include <linux/stat.h>
#include <asm/io.h>
-#include <asm/system.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b538f0883fd2..e002cd466e9a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
.mailbox = 0x0042C,
+ .max_cmds = 100,
.cache_line_size = 0x20,
+ .clear_isr = 1,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
},
{ /* Snipe and Scamp */
.mailbox = 0x0052C,
+ .max_cmds = 100,
.cache_line_size = 0x20,
+ .clear_isr = 1,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
},
{ /* CRoC */
.mailbox = 0x00044,
+ .max_cmds = 1000,
.cache_line_size = 0x20,
+ .clear_isr = 0,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -183,7 +189,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds [] = {
@@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
- mb();
-
ipr_send_command(ipr_cmd);
}
@@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
- mb();
-
ipr_send_command(ipr_cmd);
} else {
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if ((res->bus == starget->channel) &&
- (res->target == starget->id) &&
- (res->lun == 0)) {
+ (res->target == starget->id)) {
return res;
}
}
@@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget)
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
if (ioa_cfg->sis64) {
- if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
- clear_bit(starget->id, ioa_cfg->array_ids);
- else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
- clear_bit(starget->id, ioa_cfg->vset_ids);
- else if (starget->channel == 0)
- clear_bit(starget->id, ioa_cfg->target_ids);
+ if (!ipr_find_starget(starget)) {
+ if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->array_ids);
+ else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->vset_ids);
+ else if (starget->channel == 0)
+ clear_bit(starget->id, ioa_cfg->target_ids);
+ }
}
if (sata_port) {
@@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
del_timer(&ioa_cfg->reset_cmd->timer);
ipr_reset_ioa_job(ioa_cfg->reset_cmd);
} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
- if (ipr_debug && printk_ratelimit())
- dev_err(&ioa_cfg->pdev->dev,
- "Spurious interrupt detected. 0x%08X\n", int_reg);
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
- return IRQ_NONE;
+ if (ioa_cfg->clear_isr) {
+ if (ipr_debug && printk_ratelimit())
+ dev_err(&ioa_cfg->pdev->dev,
+ "Spurious interrupt detected. 0x%08X\n", int_reg);
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ return IRQ_NONE;
+ }
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
@@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
}
}
+ if (ipr_cmd && !ioa_cfg->clear_isr)
+ break;
+
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
@@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
}
- if (likely(rc == 0)) {
- mb();
- ipr_send_command(ipr_cmd);
- } else {
- list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(rc != 0)) {
+ list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
+ ipr_send_command(ipr_cmd);
return 0;
}
@@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
return AC_ERR_INVALID;
}
- mb();
-
ipr_send_command(ipr_cmd);
return 0;
@@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (ioa_cfg->ipr_cmd_pool)
pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
+ kfree(ioa_cfg->ipr_cmnd_list);
+ kfree(ioa_cfg->ipr_cmnd_list_dma);
+ ioa_cfg->ipr_cmnd_list = NULL;
+ ioa_cfg->ipr_cmnd_list_dma = NULL;
ioa_cfg->ipr_cmd_pool = NULL;
}
@@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
int i;
ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
- sizeof(struct ipr_cmnd), 16, 0);
+ sizeof(struct ipr_cmnd), 512, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
+ ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
+ ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
+
+ if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
+ ipr_free_cmd_blks(ioa_cfg);
+ return -ENOMEM;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
host->max_channel = IPR_MAX_BUS_TO_SCAN;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
+ host->can_queue = ioa_cfg->max_cmds;
pci_set_drvdata(pdev, ioa_cfg);
p = &ioa_cfg->chip_cfg->regs;
@@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
/* set SIS 32 or SIS 64 */
ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+ ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
+ ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
@@ -9191,15 +9210,15 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
{ }
};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b13f9cc12279..153b8bd91d1e 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.2"
-#define IPR_DRIVER_DATE "(April 27, 2011)"
+#define IPR_DRIVER_VERSION "2.5.3"
+#define IPR_DRIVER_DATE "(March 10, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -53,12 +53,12 @@
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
* ops the mid-layer can send to the adapter.
*/
-#define IPR_NUM_BASE_CMD_BLKS 100
+#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds)
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
-#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A
+#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A
#define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -92,7 +92,7 @@
#define IPR_SUBS_DEV_ID_57B1 0x0355
#define IPR_SUBS_DEV_ID_574D 0x0356
-#define IPR_SUBS_DEV_ID_575D 0x035D
+#define IPR_SUBS_DEV_ID_57C8 0x035D
#define IPR_NAME "ipr"
@@ -153,7 +153,7 @@
#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
-#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS
+#define IPR_MAX_COMMANDS 100
#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
IPR_NUM_INTERNAL_CMD_BLKS)
@@ -1305,7 +1305,9 @@ struct ipr_interrupts {
struct ipr_chip_cfg_t {
u32 mailbox;
+ u16 max_cmds;
u8 cache_line_size;
+ u8 clear_isr;
struct ipr_interrupt_offsets regs;
};
@@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg {
u8 sis64:1;
u8 dump_timeout:1;
u8 cfg_locked:1;
+ u8 clear_isr:1;
u8 revid;
@@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg {
struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
- struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
- dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+ u32 max_cmds;
+ struct ipr_cmnd **ipr_cmnd_list;
+ dma_addr_t *ipr_cmnd_list_dma;
}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index d77891e5683b..b6d7a5c2fc94 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
/* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') {
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
return 1;
}
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
}
return 0;
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 418391b1c361..d4bf9c12ecd4 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -58,7 +58,6 @@
#include "host.h"
#include "isci.h"
#include "port.h"
-#include "host.h"
#include "probe_roms.h"
#include "remote_device.h"
#include "request.h"
@@ -650,15 +649,13 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = ha->lldd_ha;
if (test_bit(IHOST_START_PENDING, &ihost->flags))
return 0;
- /* todo: use sas_flush_discovery once it is upstream */
- scsi_flush_work(shost);
-
- scsi_flush_work(shost);
+ sas_drain_work(ha);
dev_dbg(&ihost->pdev->dev,
"%s: ihost->status = %d, time = %ld\n",
@@ -1491,6 +1488,15 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+ u32 val;
+
+ /* enable clock gating for power control of the scu unit */
+ val = readl(&ihost->smu_registers->clock_gating_control);
+ val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
+ SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
+ SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
+ val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
+ writel(val, &ihost->smu_registers->clock_gating_control);
/* set the default interrupt coalescence number and timeout value. */
sci_controller_set_interrupt_coalescence(ihost, 0, 0);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 5477f0fa8233..adbad69d1069 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -187,6 +187,7 @@ struct isci_host {
int id; /* unique within a given pci device */
struct isci_phy phys[SCI_MAX_PHYS];
struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+ struct asd_sas_port sas_ports[SCI_MAX_PORTS];
struct sas_ha_struct sas_ha;
spinlock_t state_lock;
@@ -393,24 +394,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
#define sci_controller_clear_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
-static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
-{
-
- if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
- return NULL;
-
- return &iphy->isci_port->isci_host->pdev->dev;
-}
-
-static inline struct device *sciport_to_dev(struct isci_port *iport)
-{
-
- if (!iport || !iport->isci_host)
- return NULL;
-
- return &iport->isci_host->pdev->dev;
-}
-
static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
{
if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 17c4c2c89c2e..5137db5a5d85 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -60,6 +60,7 @@
#include <linux/efi.h>
#include <asm/string.h>
#include <scsi/scsi_host.h>
+#include "host.h"
#include "isci.h"
#include "task.h"
#include "probe_roms.h"
@@ -154,7 +155,6 @@ static struct scsi_host_template isci_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
- .slave_destroy = sas_slave_destroy,
.scan_finished = isci_host_scan_finished,
.scan_start = isci_host_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -166,9 +166,6 @@ static struct scsi_host_template isci_sht = {
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
- .eh_bus_reset_handler = isci_bus_reset_handler,
- .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = isci_host_attrs,
@@ -194,6 +191,9 @@ static struct sas_domain_function_template isci_transport_ops = {
.lldd_lu_reset = isci_task_lu_reset,
.lldd_query_task = isci_task_query_task,
+ /* ata recovery called from ata-eh */
+ .lldd_ata_check_ready = isci_ata_check_ready,
+
/* Port and Adapter management */
.lldd_clear_nexus_port = isci_task_clear_nexus_port,
.lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
@@ -242,18 +242,13 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
if (!sas_ports)
return -ENOMEM;
- /*----------------- Libsas Initialization Stuff----------------------
- * Set various fields in the sas_ha struct:
- */
-
sas_ha->sas_ha_name = DRV_NAME;
sas_ha->lldd_module = THIS_MODULE;
sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
- /* set the array of phy and port structs. */
for (i = 0; i < SCI_MAX_PHYS; i++) {
sas_phys[i] = &isci_host->phys[i].sas_phy;
- sas_ports[i] = &isci_host->ports[i].sas_port;
+ sas_ports[i] = &isci_host->sas_ports[i];
}
sas_ha->sas_phy = sas_phys;
@@ -528,6 +523,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
goto err_host_alloc;
}
pci_info->hosts[i] = h;
+
+ /* turn on DIF support */
+ scsi_host_set_prot(h->shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
@@ -551,9 +553,9 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
int i;
for_each_isci_host(i, ihost, pdev) {
+ wait_for_start(ihost);
isci_unregister(ihost);
isci_host_deinit(ihost);
- sci_controller_disable_interrupts(ihost);
}
}
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fe18acfd6eb3..fab3586840b5 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -59,6 +59,16 @@
#include "scu_event_codes.h"
#include "probe_roms.h"
+#undef C
+#define C(a) (#a)
+static const char *phy_state_name(enum sci_phy_states state)
+{
+ static const char * const strings[] = PHY_STATES;
+
+ return strings[state];
+}
+#undef C
+
/* Maximum arbitration wait time in micro-seconds */
#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
@@ -67,6 +77,19 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
return iphy->max_negotiated_speed;
}
+static struct isci_host *phy_to_host(struct isci_phy *iphy)
+{
+ struct isci_phy *table = iphy - iphy->phy_index;
+ struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
+
+ return ihost;
+}
+
+static struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+ return &phy_to_host(iphy)->pdev->dev;
+}
+
static enum sci_status
sci_phy_transport_layer_initialization(struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *reg)
@@ -446,8 +469,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy)
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_STOPPED) {
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -472,8 +495,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy)
case SCI_PHY_READY:
break;
default:
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -486,8 +509,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy)
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_READY) {
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -536,8 +559,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
return SCI_SUCCESS;
}
default:
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -591,6 +614,60 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy,
sci_change_state(&iphy->sm, next_state);
}
+static const char *phy_event_name(u32 event_code)
+{
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_PORT_SELECTOR_DETECTED:
+ return "port selector";
+ case SCU_EVENT_SENT_PORT_SELECTION:
+ return "port selection";
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ return "tx hard reset";
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ return "rx hard reset";
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ return "identify timeout";
+ case SCU_EVENT_LINK_FAILURE:
+ return "link fail";
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ return "sata spinup hold";
+ case SCU_EVENT_SAS_15_SSC:
+ case SCU_EVENT_SAS_15:
+ return "sas 1.5";
+ case SCU_EVENT_SAS_30_SSC:
+ case SCU_EVENT_SAS_30:
+ return "sas 3.0";
+ case SCU_EVENT_SAS_60_SSC:
+ case SCU_EVENT_SAS_60:
+ return "sas 6.0";
+ case SCU_EVENT_SATA_15_SSC:
+ case SCU_EVENT_SATA_15:
+ return "sata 1.5";
+ case SCU_EVENT_SATA_30_SSC:
+ case SCU_EVENT_SATA_30:
+ return "sata 3.0";
+ case SCU_EVENT_SATA_60_SSC:
+ case SCU_EVENT_SATA_60:
+ return "sata 6.0";
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ return "sas detect";
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ return "sata detect";
+ default:
+ return "unknown";
+ }
+}
+
+#define phy_event_dbg(iphy, state, code) \
+ dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+ phy_to_host(iphy)->id, iphy->phy_index, \
+ phy_state_name(state), phy_event_name(code), code)
+
+#define phy_event_warn(iphy, state, code) \
+ dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
+ phy_to_host(iphy)->id, iphy->phy_index, \
+ phy_state_name(state), phy_event_name(code), code)
+
enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
enum sci_phy_states state = iphy->sm.current_state_id;
@@ -607,11 +684,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
iphy->is_in_link_training = true;
break;
default:
- dev_dbg(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__,
- event_code);
+ phy_event_dbg(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -648,11 +721,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__, event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
break;
}
@@ -677,10 +746,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__, event_code);
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -691,11 +757,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received unexpected "
- "event_code %x\n",
- __func__,
- event_code);
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -719,11 +781,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__, event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -751,12 +809,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sas_link_training(iphy);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__,
- event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -793,11 +846,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sas_link_training(iphy);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__, event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
@@ -815,12 +864,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: PHY starting substate machine received "
- "unexpected event_code %x\n",
- __func__,
- event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@@ -838,10 +882,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
iphy->bcn_received_while_port_unassigned = true;
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%sP SCIC PHY 0x%p ready state machine received "
- "unexpected event_code %x\n",
- __func__, iphy, event_code);
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
@@ -852,18 +893,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
- dev_warn(sciphy_to_dev(iphy),
- "%s: SCIC PHY 0x%p resetting state machine received "
- "unexpected event_code %x\n",
- __func__, iphy, event_code);
-
+ phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
break;
}
return SCI_SUCCESS;
default:
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -956,8 +993,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
return result;
}
default:
- dev_dbg(sciphy_to_dev(iphy),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
+ __func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1299,7 +1336,6 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
sas_addr = cpu_to_be64(sci_sas_addr);
memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
- iphy->isci_port = NULL;
iphy->sas_phy.enabled = 0;
iphy->sas_phy.id = index;
iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
@@ -1333,13 +1369,13 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
{
int ret = 0;
struct isci_phy *iphy = sas_phy->lldd_phy;
- struct isci_port *iport = iphy->isci_port;
+ struct asd_sas_port *port = sas_phy->port;
struct isci_host *ihost = sas_phy->ha->lldd_ha;
unsigned long flags;
dev_dbg(&ihost->pdev->dev,
"%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
- __func__, sas_phy, func, buf, iphy, iport);
+ __func__, sas_phy, func, buf, iphy, port);
switch (func) {
case PHY_FUNC_DISABLE:
@@ -1356,11 +1392,10 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
break;
case PHY_FUNC_HARD_RESET:
- if (!iport)
+ if (!port)
return -ENODEV;
- /* Perform the port reset. */
- ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+ ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
break;
case PHY_FUNC_GET_EVENTS: {
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 67699c8e321c..0e45833ba06d 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -103,7 +103,6 @@ struct isci_phy {
struct scu_transport_layer_registers __iomem *transport_layer_registers;
struct scu_link_layer_registers __iomem *link_layer_registers;
struct asd_sas_phy sas_phy;
- struct isci_port *isci_port;
u8 sas_addr[SAS_ADDR_SIZE];
union {
struct sas_identify_frame iaf;
@@ -344,101 +343,65 @@ enum sci_phy_counter_id {
SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
};
-enum sci_phy_states {
- /**
- * Simply the initial state for the base domain state machine.
- */
- SCI_PHY_INITIAL,
-
- /**
- * This state indicates that the phy has successfully been stopped.
- * In this state no new IO operations are permitted on this phy.
- * This state is entered from the INITIAL state.
- * This state is entered from the STARTING state.
- * This state is entered from the READY state.
- * This state is entered from the RESETTING state.
- */
- SCI_PHY_STOPPED,
-
- /**
- * This state indicates that the phy is in the process of becomming
- * ready. In this state no new IO operations are permitted on this phy.
- * This state is entered from the STOPPED state.
- * This state is entered from the READY state.
- * This state is entered from the RESETTING state.
- */
- SCI_PHY_STARTING,
-
- /**
- * Initial state
- */
- SCI_PHY_SUB_INITIAL,
-
- /**
- * Wait state for the hardware OSSP event type notification
- */
- SCI_PHY_SUB_AWAIT_OSSP_EN,
-
- /**
- * Wait state for the PHY speed notification
- */
- SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
-
- /**
- * Wait state for the IAF Unsolicited frame notification
- */
- SCI_PHY_SUB_AWAIT_IAF_UF,
-
- /**
- * Wait state for the request to consume power
- */
- SCI_PHY_SUB_AWAIT_SAS_POWER,
-
- /**
- * Wait state for request to consume power
- */
- SCI_PHY_SUB_AWAIT_SATA_POWER,
-
- /**
- * Wait state for the SATA PHY notification
- */
- SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
-
- /**
- * Wait for the SATA PHY speed notification
- */
- SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
-
- /**
- * Wait state for the SIGNATURE FIS unsolicited frame notification
- */
- SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
-
- /**
- * Exit state for this state machine
- */
- SCI_PHY_SUB_FINAL,
-
- /**
- * This state indicates the the phy is now ready. Thus, the user
- * is able to perform IO operations utilizing this phy as long as it
- * is currently part of a valid port.
- * This state is entered from the STARTING state.
- */
- SCI_PHY_READY,
-
- /**
- * This state indicates that the phy is in the process of being reset.
- * In this state no new IO operations are permitted on this phy.
- * This state is entered from the READY state.
- */
- SCI_PHY_RESETTING,
-
- /**
- * Simply the final state for the base phy state machine.
- */
- SCI_PHY_FINAL,
-};
+/**
+ * enum sci_phy_states - phy state machine states
+ * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
+ * machine.
+ * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state
+ * no new IO operations are permitted on this phy.
+ * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In
+ * this state no new IO operations are permitted on
+ * this phy.
+ * @SCI_PHY_SUB_INITIAL: Initial state
+ * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
+ * type notification
+ * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
+ * notification
+ * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
+ * power
+ * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
+ * power
+ * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
+ * notification
+ * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
+ * unsolicited frame notification
+ * @SCI_PHY_SUB_FINAL: Exit state for this state machine
+ * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform
+ * IO operations utilizing this phy as long as it is
+ * currently part of a valid port. This state is
+ * entered from the STARTING state.
+ * @SCI_PHY_RESETTING: phy is in the process of being reset. In this
+ * state no new IO operations are permitted on this
+ * phy. This state is entered from the READY state.
+ * @SCI_PHY_FINAL: Simply the final state for the base phy state
+ * machine.
+ */
+#define PHY_STATES {\
+ C(PHY_INITIAL),\
+ C(PHY_STOPPED),\
+ C(PHY_STARTING),\
+ C(PHY_SUB_INITIAL),\
+ C(PHY_SUB_AWAIT_OSSP_EN),\
+ C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
+ C(PHY_SUB_AWAIT_IAF_UF),\
+ C(PHY_SUB_AWAIT_SAS_POWER),\
+ C(PHY_SUB_AWAIT_SATA_POWER),\
+ C(PHY_SUB_AWAIT_SATA_PHY_EN),\
+ C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
+ C(PHY_SUB_AWAIT_SIG_FIS_UF),\
+ C(PHY_SUB_FINAL),\
+ C(PHY_READY),\
+ C(PHY_RESETTING),\
+ C(PHY_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_phy_states PHY_STATES;
+#undef C
void sci_phy_construct(
struct isci_phy *iphy,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 7c6ac58a5c4c..5fada73b71ff 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -60,18 +60,29 @@
#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
#define SCU_DUMMY_INDEX (0xFFFF)
-static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+#undef C
+#define C(a) (#a)
+const char *port_state_name(enum sci_port_states state)
{
- unsigned long flags;
+ static const char * const strings[] = PORT_STATES;
+
+ return strings[state];
+}
+#undef C
+
+static struct device *sciport_to_dev(struct isci_port *iport)
+{
+ int i = iport->physical_port_index;
+ struct isci_port *table;
+ struct isci_host *ihost;
+
+ if (i == SCIC_SDS_DUMMY_PORT)
+ i = SCI_MAX_PORTS+1;
- dev_dbg(&iport->isci_host->pdev->dev,
- "%s: iport = %p, state = 0x%x\n",
- __func__, iport, status);
+ table = iport - i;
+ ihost = container_of(table, typeof(*ihost), ports[0]);
- /* XXX pointless lock */
- spin_lock_irqsave(&iport->state_lock, flags);
- iport->status = status;
- spin_unlock_irqrestore(&iport->state_lock, flags);
+ return &ihost->pdev->dev;
}
static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
@@ -165,18 +176,12 @@ static void isci_port_link_up(struct isci_host *isci_host,
struct sci_port_properties properties;
unsigned long success = true;
- BUG_ON(iphy->isci_port != NULL);
-
- iphy->isci_port = iport;
-
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n",
__func__, iport);
spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
- isci_port_change_state(iphy->isci_port, isci_starting);
-
sci_port_get_properties(iport, &properties);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
@@ -258,7 +263,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
__func__, isci_device);
set_bit(IDEV_GONE, &isci_device->flags);
}
- isci_port_change_state(isci_port, isci_stopping);
}
}
@@ -269,52 +273,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
PHYE_LOSS_OF_SIGNAL);
- isci_phy->isci_port = NULL;
-
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p - Done\n", __func__, isci_port);
}
-
-/**
- * isci_port_ready() - This function is called by the sci core when a link
- * becomes ready.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the sci port with the active link.
- *
- */
-static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
-{
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_port = %p\n", __func__, isci_port);
-
- complete_all(&isci_port->start_complete);
- isci_port_change_state(isci_port, isci_ready);
- return;
-}
-
-/**
- * isci_port_not_ready() - This function is called by the sci core when a link
- * is not ready. All remote devices on this link will be removed if they are
- * in the stopping state.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the sci port with the active link.
- *
- */
-static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
-{
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_port = %p\n", __func__, isci_port);
-}
-
-static void isci_port_stop_complete(struct isci_host *ihost,
- struct isci_port *iport,
- enum sci_status completion_status)
-{
- dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
-}
-
-
static bool is_port_ready_state(enum sci_port_states state)
{
switch (state) {
@@ -353,7 +315,9 @@ static void port_state_machine_change(struct isci_port *iport,
static void isci_port_hard_reset_complete(struct isci_port *isci_port,
enum sci_status completion_status)
{
- dev_dbg(&isci_port->isci_host->pdev->dev,
+ struct isci_host *ihost = isci_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
"%s: isci_port = %p, completion_status=%x\n",
__func__, isci_port, completion_status);
@@ -364,23 +328,24 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
/* The reset failed. The port state is now SCI_PORT_FAILED. */
if (isci_port->active_phy_mask == 0) {
+ int phy_idx = isci_port->last_active_phy;
+ struct isci_phy *iphy = &ihost->phys[phy_idx];
/* Generate the link down now to the host, since it
* was intercepted by the hard reset state machine when
* it really happened.
*/
- isci_port_link_down(isci_port->isci_host,
- &isci_port->isci_host->phys[
- isci_port->last_active_phy],
- isci_port);
+ isci_port_link_down(ihost, iphy, isci_port);
}
/* Advance the port state so that link state changes will be
- * noticed.
- */
+ * noticed.
+ */
port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
}
- complete_all(&isci_port->hard_reset_complete);
+ clear_bit(IPORT_RESET_PENDING, &isci_port->state);
+ wake_up(&ihost->eventq);
+
}
/* This method will return a true value if the specified phy can be assigned to
@@ -835,10 +800,9 @@ static void port_timeout(unsigned long data)
__func__,
iport);
} else if (current_state == SCI_PORT_STOPPING) {
- /* if the port is still stopping then the stop has not completed */
- isci_port_stop_complete(iport->owning_controller,
- iport,
- SCI_FAILURE_TIMEOUT);
+ dev_dbg(sciport_to_dev(iport),
+ "%s: port%d: stop complete timeout\n",
+ __func__, iport->physical_port_index);
} else {
/* The port is in the ready state and we have a timer
* reporting a timeout this should not happen.
@@ -1003,7 +967,8 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
- isci_port_ready(ihost, iport);
+ dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
+ __func__, iport->physical_port_index);
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->phy_table[index]) {
@@ -1069,7 +1034,8 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi
*/
sci_port_abort_dummy_request(iport);
- isci_port_not_ready(ihost, iport);
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
if (iport->ready_exit)
sci_port_invalidate_dummy_remote_node(iport);
@@ -1081,7 +1047,8 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
struct isci_host *ihost = iport->owning_controller;
if (iport->active_phy_mask == 0) {
- isci_port_not_ready(ihost, iport);
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
} else
@@ -1097,8 +1064,8 @@ enum sci_status sci_port_start(struct isci_port *iport)
state = iport->sm.current_state_id;
if (state != SCI_PORT_STOPPED) {
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1172,8 +1139,8 @@ enum sci_status sci_port_stop(struct isci_port *iport)
SCI_PORT_STOPPING);
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1187,8 +1154,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
state = iport->sm.current_state_id;
if (state != SCI_PORT_SUB_OPERATIONAL) {
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1282,8 +1249,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1332,8 +1299,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1375,8 +1342,8 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1405,8 +1372,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport,
sci_port_deactivate_phy(iport, iphy, false);
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1425,8 +1392,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport,
iport->started_request_count++;
return SCI_SUCCESS;
default:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -1440,8 +1407,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport,
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
- dev_warn(sciport_to_dev(iport),
- "%s: in wrong state: %d\n", __func__, state);
+ dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
+ __func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_PORT_STOPPING:
sci_port_decrement_request_count(iport);
@@ -1547,7 +1514,8 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
if (prev_state == SCI_PORT_RESETTING)
isci_port_hard_reset_complete(iport, SCI_SUCCESS);
else
- isci_port_not_ready(ihost, iport);
+ dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
+ __func__, iport->physical_port_index);
/* Post and suspend the dummy remote node context for this port. */
sci_port_post_dummy_remote_node(iport);
@@ -1644,22 +1612,7 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
{
INIT_LIST_HEAD(&iport->remote_dev_list);
INIT_LIST_HEAD(&iport->domain_dev_list);
- spin_lock_init(&iport->state_lock);
- init_completion(&iport->start_complete);
iport->isci_host = ihost;
- isci_port_change_state(iport, isci_freed);
-}
-
-/**
- * isci_port_get_state() - This function gets the status of the port object.
- * @isci_port: This parameter points to the isci_port object
- *
- * status of the object as a isci_status enum.
- */
-enum isci_status isci_port_get_state(
- struct isci_port *isci_port)
-{
- return isci_port->status;
}
void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
@@ -1670,6 +1623,11 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy
isci_port_bc_change_received(ihost, iport, iphy);
}
+static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
+{
+ wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
+}
+
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy)
{
@@ -1680,9 +1638,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
__func__, iport);
- init_completion(&iport->hard_reset_complete);
-
spin_lock_irqsave(&ihost->scic_lock, flags);
+ set_bit(IPORT_RESET_PENDING, &iport->state);
#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
@@ -1690,7 +1647,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (status == SCI_SUCCESS) {
- wait_for_completion(&iport->hard_reset_complete);
+ wait_port_reset(ihost, iport);
dev_dbg(&ihost->pdev->dev,
"%s: iport = %p; hard reset completion\n",
@@ -1704,6 +1661,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
__func__, iport, iport->hard_reset_status);
}
} else {
+ clear_bit(IPORT_RESET_PENDING, &iport->state);
+ wake_up(&ihost->eventq);
ret = TMF_RESP_FUNC_FAILED;
dev_err(&ihost->pdev->dev,
@@ -1726,24 +1685,80 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
return ret;
}
-/**
- * isci_port_deformed() - This function is called by libsas when a port becomes
- * inactive.
- * @phy: This parameter specifies the libsas phy with the inactive port.
- *
- */
+int isci_ata_check_ready(struct domain_device *dev)
+{
+ struct isci_port *iport = dev->port->lldd_port;
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev)
+ goto out;
+
+ if (test_bit(IPORT_RESET_PENDING, &iport->state))
+ goto out;
+
+ rc = !!iport->active_phy_mask;
+ out:
+ isci_put_device(idev);
+
+ return rc;
+}
+
void isci_port_deformed(struct asd_sas_phy *phy)
{
- pr_debug("%s: sas_phy = %p\n", __func__, phy);
+ struct isci_host *ihost = phy->ha->lldd_ha;
+ struct isci_port *iport = phy->port->lldd_port;
+ unsigned long flags;
+ int i;
+
+ /* we got a port notification on a port that was subsequently
+ * torn down and libsas is just now catching up
+ */
+ if (!iport)
+ return;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ if (iport->active_phy_mask & 1 << i)
+ break;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (i >= SCI_MAX_PHYS)
+ dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
+ __func__, (long) (iport - &ihost->ports[0]));
}
-/**
- * isci_port_formed() - This function is called by libsas when a port becomes
- * active.
- * @phy: This parameter specifies the libsas phy with the active port.
- *
- */
void isci_port_formed(struct asd_sas_phy *phy)
{
- pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+ struct isci_host *ihost = phy->ha->lldd_ha;
+ struct isci_phy *iphy = to_iphy(phy);
+ struct asd_sas_port *port = phy->port;
+ struct isci_port *iport;
+ unsigned long flags;
+ int i;
+
+ /* initial ports are formed as the driver is still initializing,
+ * wait for that process to complete
+ */
+ wait_for_start(ihost);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ iport = &ihost->ports[i];
+ if (iport->active_phy_mask & 1 << iphy->phy_index)
+ break;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (i >= SCI_MAX_PORTS)
+ iport = NULL;
+
+ port->lldd_port = iport;
}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 08116090eb70..6b56240c2051 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -95,14 +95,11 @@ enum isci_status {
* @timer: timeout start/stop operations
*/
struct isci_port {
- enum isci_status status;
struct isci_host *isci_host;
- struct asd_sas_port sas_port;
struct list_head remote_dev_list;
- spinlock_t state_lock;
struct list_head domain_dev_list;
- struct completion start_complete;
- struct completion hard_reset_complete;
+ #define IPORT_RESET_PENDING 0
+ unsigned long state;
enum sci_status hard_reset_status;
struct sci_base_state_machine sm;
bool ready_exit;
@@ -147,70 +144,47 @@ struct sci_port_properties {
};
/**
- * enum sci_port_states - This enumeration depicts all the states for the
- * common port state machine.
- *
- *
+ * enum sci_port_states - port state machine states
+ * @SCI_PORT_STOPPED: port has successfully been stopped. In this state
+ * no new IO operations are permitted. This state is
+ * entered from the STOPPING state.
+ * @SCI_PORT_STOPPING: port is in the process of stopping. In this
+ * state no new IO operations are permitted, but
+ * existing IO operations are allowed to complete.
+ * This state is entered from the READY state.
+ * @SCI_PORT_READY: port is now ready. Thus, the user is able to
+ * perform IO operations on this port. This state is
+ * entered from the STARTING state.
+ * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
+ * phys.
+ * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
+ * least one phy operational.
+ * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
+ * add/remove phy event. This state is only
+ * used in Automatic Port Configuration Mode
+ * (APC)
+ * @SCI_PORT_RESETTING: port is in the process of performing a hard
+ * reset. Thus, the user is unable to perform IO
+ * operations on this port. This state is entered
+ * from the READY state.
+ * @SCI_PORT_FAILED: port has failed a reset request. This state is
+ * entered when a port reset request times out. This
+ * state is entered from the RESETTING state.
*/
-enum sci_port_states {
- /**
- * This state indicates that the port has successfully been stopped.
- * In this state no new IO operations are permitted.
- * This state is entered from the STOPPING state.
- */
- SCI_PORT_STOPPED,
-
- /**
- * This state indicates that the port is in the process of stopping.
- * In this state no new IO operations are permitted, but existing IO
- * operations are allowed to complete.
- * This state is entered from the READY state.
- */
- SCI_PORT_STOPPING,
-
- /**
- * This state indicates the port is now ready. Thus, the user is
- * able to perform IO operations on this port.
- * This state is entered from the STARTING state.
- */
- SCI_PORT_READY,
-
- /**
- * The substate where the port is started and ready but has no
- * active phys.
- */
- SCI_PORT_SUB_WAITING,
-
- /**
- * The substate where the port is started and ready and there is
- * at least one phy operational.
- */
- SCI_PORT_SUB_OPERATIONAL,
-
- /**
- * The substate where the port is started and there was an
- * add/remove phy event. This state is only used in Automatic
- * Port Configuration Mode (APC)
- */
- SCI_PORT_SUB_CONFIGURING,
-
- /**
- * This state indicates the port is in the process of performing a hard
- * reset. Thus, the user is unable to perform IO operations on this
- * port.
- * This state is entered from the READY state.
- */
- SCI_PORT_RESETTING,
-
- /**
- * This state indicates the port has failed a reset request. This state
- * is entered when a port reset request times out.
- * This state is entered from the RESETTING state.
- */
- SCI_PORT_FAILED,
-
-
-};
+#define PORT_STATES {\
+ C(PORT_STOPPED),\
+ C(PORT_STOPPING),\
+ C(PORT_READY),\
+ C(PORT_SUB_WAITING),\
+ C(PORT_SUB_OPERATIONAL),\
+ C(PORT_SUB_CONFIGURING),\
+ C(PORT_RESETTING),\
+ C(PORT_FAILED),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_port_states PORT_STATES;
+#undef C
static inline void sci_port_decrement_request_count(struct isci_port *iport)
{
@@ -296,9 +270,6 @@ void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
-enum isci_status isci_port_get_state(
- struct isci_port *isci_port);
-
void isci_port_formed(struct asd_sas_phy *);
void isci_port_deformed(struct asd_sas_phy *);
@@ -309,4 +280,5 @@ void isci_port_init(
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
+int isci_ata_check_ready(struct domain_device *dev);
#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index eaa541afc755..7eb0ccd45fe6 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -370,6 +370,27 @@ struct scu_iit_entry {
>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
)
+/* ***************************************************************************** */
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1)
+#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2)
+#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3)
+#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16)
+#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31)
+#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000)
+#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0)
+
+#define SMU_CGUCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
+
+#define SMU_CGUCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
+
/* -------------------------------------------------------------------------- */
#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
@@ -992,8 +1013,10 @@ struct smu_registers {
u32 mmr_address_window;
/* 0x00A4 SMDW */
u32 mmr_data_window;
- u32 reserved_A8;
- u32 reserved_AC;
+/* 0x00A8 CGUCR */
+ u32 clock_gating_control;
+/* 0x00AC CGUPC */
+ u32 clock_gating_performance;
/* A whole bunch of reserved space */
u32 reserved_Bx[4];
u32 reserved_Cx[4];
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index dd74b6ceeb82..8f501b0a81d6 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -62,6 +62,16 @@
#include "scu_event_codes.h"
#include "task.h"
+#undef C
+#define C(a) (#a)
+const char *dev_state_name(enum sci_remote_device_states state)
+{
+ static const char * const strings[] = REMOTE_DEV_STATES;
+
+ return strings[state];
+}
+#undef C
+
/**
* isci_remote_device_not_ready() - This function is called by the ihost when
* the remote device is not ready. We mark the isci device as ready (not
@@ -167,8 +177,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
case SCI_DEV_FAILED:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_STOPPED:
return SCI_SUCCESS;
@@ -226,8 +236,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_IDLE:
@@ -246,8 +256,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_DEV_RESETTING) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -262,8 +272,8 @@ enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_STP_DEV_CMD) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -287,8 +297,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
case SCI_SMP_DEV_IDLE:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
/* Return the frame back to the controller */
sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
@@ -502,8 +512,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
/* attempt to start an io request for this device object. The remote
@@ -637,8 +647,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
case SCI_DEV_FAILED:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_AWAIT_RESET:
@@ -721,8 +731,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_STP_DEV_IDLE:
case SCI_STP_DEV_CMD:
@@ -853,8 +863,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide
struct isci_host *ihost;
if (state != SCI_DEV_STOPPED) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1204,8 +1214,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
enum sci_status status;
if (state != SCI_DEV_STOPPED) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
- __func__, state);
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
+ __func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1308,7 +1318,6 @@ void isci_remote_device_release(struct kref *kref)
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_IO_READY, &idev->flags);
clear_bit(IDEV_GONE, &idev->flags);
- clear_bit(IDEV_EH, &idev->flags);
smp_mb__before_clear_bit();
clear_bit(IDEV_ALLOCATED, &idev->flags);
wake_up(&ihost->eventq);
@@ -1381,34 +1390,17 @@ void isci_remote_device_gone(struct domain_device *dev)
*
* status, zero indicates success.
*/
-int isci_remote_device_found(struct domain_device *domain_dev)
+int isci_remote_device_found(struct domain_device *dev)
{
- struct isci_host *isci_host = dev_to_ihost(domain_dev);
- struct isci_port *isci_port;
- struct isci_phy *isci_phy;
- struct asd_sas_port *sas_port;
- struct asd_sas_phy *sas_phy;
+ struct isci_host *isci_host = dev_to_ihost(dev);
+ struct isci_port *isci_port = dev->port->lldd_port;
struct isci_remote_device *isci_device;
enum sci_status status;
dev_dbg(&isci_host->pdev->dev,
- "%s: domain_device = %p\n", __func__, domain_dev);
-
- wait_for_start(isci_host);
-
- sas_port = domain_dev->port;
- sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
- port_phy_el);
- isci_phy = to_iphy(sas_phy);
- isci_port = isci_phy->isci_port;
-
- /* we are being called for a device on this port,
- * so it has to come up eventually
- */
- wait_for_completion(&isci_port->start_complete);
+ "%s: domain_device = %p\n", __func__, dev);
- if ((isci_stopping == isci_port_get_state(isci_port)) ||
- (isci_stopped == isci_port_get_state(isci_port)))
+ if (!isci_port)
return -ENODEV;
isci_device = isci_remote_device_alloc(isci_host, isci_port);
@@ -1419,7 +1411,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
INIT_LIST_HEAD(&isci_device->node);
spin_lock_irq(&isci_host->scic_lock);
- isci_device->domain_dev = domain_dev;
+ isci_device->domain_dev = dev;
isci_device->isci_port = isci_port;
list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
@@ -1432,7 +1424,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
if (status == SCI_SUCCESS) {
/* device came up, advertise it to the world */
- domain_dev->lldd_dev = isci_device;
+ dev->lldd_dev = isci_device;
} else
isci_put_device(isci_device);
spin_unlock_irq(&isci_host->scic_lock);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 483ee50152f3..58637ee08f55 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -82,10 +82,9 @@ struct isci_remote_device {
#define IDEV_START_PENDING 0
#define IDEV_STOP_PENDING 1
#define IDEV_ALLOCATED 2
- #define IDEV_EH 3
- #define IDEV_GONE 4
- #define IDEV_IO_READY 5
- #define IDEV_IO_NCQERROR 6
+ #define IDEV_GONE 3
+ #define IDEV_IO_READY 4
+ #define IDEV_IO_NCQERROR 5
unsigned long flags;
struct kref kref;
struct isci_port *isci_port;
@@ -180,122 +179,101 @@ enum sci_status sci_remote_device_reset_complete(
/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
+ * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
+ * state machine.
*
+ * @SCI_DEV_STOPPED: This state indicates that the remote device has
+ * successfully been stopped. In this state no new IO operations are
+ * permitted. This state is entered from the INITIAL state. This state
+ * is entered from the STOPPING state.
*
+ * @SCI_DEV_STARTING: This state indicates the the remote device is in
+ * the process of becoming ready (i.e. starting). In this state no new
+ * IO operations are permitted. This state is entered from the STOPPED
+ * state.
+ *
+ * @SCI_DEV_READY: This state indicates the remote device is now ready.
+ * Thus, the user is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ *
+ * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
+ * device. When there are no active IO for the device it is is in this
+ * state.
+ *
+ * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * device. This state is entered when the device is processing a
+ * non-NCQ command. The device object will fail any new start IO
+ * requests until this command is complete.
+ *
+ * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
+ * This state is entered when the device is processing an NCQ reuqest.
+ * It will remain in this state so long as there is one or more NCQ
+ * requests being processed.
+ *
+ * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
+ * remote device. This state is entered when an SDB error FIS is
+ * received by the device object while in the NCQ state. The device
+ * object will only accept a READ LOG command while in this state.
+ *
+ * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
+ * ATAPI remote device. This state is entered when ATAPI device sends
+ * error status FIS without data while the device object is in CMD
+ * state. A suspension event is expected in this state. The device
+ * object will resume right away.
+ *
+ * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
+ * device is waiting for the RESET task coming to be recovered from
+ * certain hardware specific error.
+ *
+ * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
+ * remote device. This is the normal operational state for a remote
+ * device.
+ *
+ * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
+ * This is the state that the device is placed in when a RNC suspend is
+ * received by the SCU hardware.
+ *
+ * @SCI_DEV_STOPPING: This state indicates that the remote device is in
+ * the process of stopping. In this state no new IO operations are
+ * permitted, but existing IO operations are allowed to complete. This
+ * state is entered from the READY state. This state is entered from
+ * the FAILED state.
+ *
+ * @SCI_DEV_FAILED: This state indicates that the remote device has
+ * failed. In this state no new IO operations are permitted. This
+ * state is entered from the INITIALIZING state. This state is entered
+ * from the READY state.
+ *
+ * @SCI_DEV_RESETTING: This state indicates the device is being reset.
+ * In this state no new IO operations are permitted. This state is
+ * entered from the READY state.
+ *
+ * @SCI_DEV_FINAL: Simply the final state for the base remote device
+ * state machine.
*/
-enum sci_remote_device_states {
- /**
- * Simply the initial state for the base remote device state machine.
- */
- SCI_DEV_INITIAL,
-
- /**
- * This state indicates that the remote device has successfully been
- * stopped. In this state no new IO operations are permitted.
- * This state is entered from the INITIAL state.
- * This state is entered from the STOPPING state.
- */
- SCI_DEV_STOPPED,
-
- /**
- * This state indicates the the remote device is in the process of
- * becoming ready (i.e. starting). In this state no new IO operations
- * are permitted.
- * This state is entered from the STOPPED state.
- */
- SCI_DEV_STARTING,
-
- /**
- * This state indicates the remote device is now ready. Thus, the user
- * is able to perform IO operations on the remote device.
- * This state is entered from the STARTING state.
- */
- SCI_DEV_READY,
-
- /**
- * This is the idle substate for the stp remote device. When there are no
- * active IO for the device it is is in this state.
- */
- SCI_STP_DEV_IDLE,
-
- /**
- * This is the command state for for the STP remote device. This state is
- * entered when the device is processing a non-NCQ command. The device object
- * will fail any new start IO requests until this command is complete.
- */
- SCI_STP_DEV_CMD,
-
- /**
- * This is the NCQ state for the STP remote device. This state is entered
- * when the device is processing an NCQ reuqest. It will remain in this state
- * so long as there is one or more NCQ requests being processed.
- */
- SCI_STP_DEV_NCQ,
-
- /**
- * This is the NCQ error state for the STP remote device. This state is
- * entered when an SDB error FIS is received by the device object while in the
- * NCQ state. The device object will only accept a READ LOG command while in
- * this state.
- */
- SCI_STP_DEV_NCQ_ERROR,
-
- /**
- * This is the ATAPI error state for the STP ATAPI remote device.
- * This state is entered when ATAPI device sends error status FIS
- * without data while the device object is in CMD state.
- * A suspension event is expected in this state.
- * The device object will resume right away.
- */
- SCI_STP_DEV_ATAPI_ERROR,
-
- /**
- * This is the READY substate indicates the device is waiting for the RESET task
- * coming to be recovered from certain hardware specific error.
- */
- SCI_STP_DEV_AWAIT_RESET,
-
- /**
- * This is the ready operational substate for the remote device. This is the
- * normal operational state for a remote device.
- */
- SCI_SMP_DEV_IDLE,
-
- /**
- * This is the suspended state for the remote device. This is the state that
- * the device is placed in when a RNC suspend is received by the SCU hardware.
- */
- SCI_SMP_DEV_CMD,
-
- /**
- * This state indicates that the remote device is in the process of
- * stopping. In this state no new IO operations are permitted, but
- * existing IO operations are allowed to complete.
- * This state is entered from the READY state.
- * This state is entered from the FAILED state.
- */
- SCI_DEV_STOPPING,
-
- /**
- * This state indicates that the remote device has failed.
- * In this state no new IO operations are permitted.
- * This state is entered from the INITIALIZING state.
- * This state is entered from the READY state.
- */
- SCI_DEV_FAILED,
-
- /**
- * This state indicates the device is being reset.
- * In this state no new IO operations are permitted.
- * This state is entered from the READY state.
- */
- SCI_DEV_RESETTING,
-
- /**
- * Simply the final state for the base remote device state machine.
- */
- SCI_DEV_FINAL,
-};
+#define REMOTE_DEV_STATES {\
+ C(DEV_INITIAL),\
+ C(DEV_STOPPED),\
+ C(DEV_STARTING),\
+ C(DEV_READY),\
+ C(STP_DEV_IDLE),\
+ C(STP_DEV_CMD),\
+ C(STP_DEV_NCQ),\
+ C(STP_DEV_NCQ_ERROR),\
+ C(STP_DEV_ATAPI_ERROR),\
+ C(STP_DEV_AWAIT_RESET),\
+ C(SMP_DEV_IDLE),\
+ C(SMP_DEV_CMD),\
+ C(DEV_STOPPING),\
+ C(DEV_FAILED),\
+ C(DEV_RESETTING),\
+ C(DEV_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_remote_device_states REMOTE_DEV_STATES;
+#undef C
+const char *dev_state_name(enum sci_remote_device_states state);
static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
{
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 748e8339d1ec..3a9463481f38 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -60,18 +60,15 @@
#include "scu_event_codes.h"
#include "scu_task_context.h"
+#undef C
+#define C(a) (#a)
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
+{
+ static const char * const strings[] = RNC_STATES;
-/**
- *
- * @sci_rnc: The RNC for which the is posted request is being made.
- *
- * This method will return true if the RNC is not in the initial state. In all
- * other states the RNC is considered active and this will return true. The
- * destroy request of the state machine drives the RNC back to the initial
- * state. If the state machine changes then this routine will also have to be
- * changed. bool true if the state machine is not in the initial state false if
- * the state machine is in the initial state
- */
+ return strings[state];
+}
+#undef C
/**
*
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index 41580ad12520..a241e0f4c865 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -85,61 +85,50 @@ struct sci_remote_node_context;
typedef void (*scics_sds_remote_node_context_callback)(void *);
/**
- * This is the enumeration of the remote node context states.
+ * enum sci_remote_node_context_states
+ * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
+ * request the remote node context will transition to the posting state.
+ *
+ * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
+ * the RNC is posted the remote node context will be made ready.
+ *
+ * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
+ * the hardware. Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ *
+ * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
+ * hardare. Once the event notification of resume complete is received the
+ * remote node context will transition to the ready state.
+ *
+ * @SCI_RNC_READY: state that the remote node context must be in to accept io
+ * request operations.
+ *
+ * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
+ * it gets a TX suspend notification from the hardware.
+ *
+ * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
+ * when it gets a TX RX suspend notification from the hardware.
+ *
+ * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
+ * for a suspend notification from the hardware. This state is entered when
+ * either there is a request to supend the remote node context or when there is
+ * a TC completion where the remote node will be suspended by the hardware.
*/
-enum scis_sds_remote_node_context_states {
- /**
- * This state is the initial state for a remote node context. On a resume
- * request the remote node context will transition to the posting state.
- */
- SCI_RNC_INITIAL,
-
- /**
- * This is a transition state that posts the RNi to the hardware. Once the RNC
- * is posted the remote node context will be made ready.
- */
- SCI_RNC_POSTING,
-
- /**
- * This is a transition state that will post an RNC invalidate to the
- * hardware. Once the invalidate is complete the remote node context will
- * transition to the posting state.
- */
- SCI_RNC_INVALIDATING,
-
- /**
- * This is a transition state that will post an RNC resume to the hardare.
- * Once the event notification of resume complete is received the remote node
- * context will transition to the ready state.
- */
- SCI_RNC_RESUMING,
-
- /**
- * This is the state that the remote node context must be in to accept io
- * request operations.
- */
- SCI_RNC_READY,
-
- /**
- * This is the state that the remote node context transitions to when it gets
- * a TX suspend notification from the hardware.
- */
- SCI_RNC_TX_SUSPENDED,
-
- /**
- * This is the state that the remote node context transitions to when it gets
- * a TX RX suspend notification from the hardware.
- */
- SCI_RNC_TX_RX_SUSPENDED,
-
- /**
- * This state is a wait state for the remote node context that waits for a
- * suspend notification from the hardware. This state is entered when either
- * there is a request to supend the remote node context or when there is a TC
- * completion where the remote node will be suspended by the hardware.
- */
- SCI_RNC_AWAIT_SUSPENSION
-};
+#define RNC_STATES {\
+ C(RNC_INITIAL),\
+ C(RNC_POSTING),\
+ C(RNC_INVALIDATING),\
+ C(RNC_RESUMING),\
+ C(RNC_READY),\
+ C(RNC_TX_SUSPENDED),\
+ C(RNC_TX_RX_SUSPENDED),\
+ C(RNC_AWAIT_SUSPENSION),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum scis_sds_remote_node_context_states RNC_STATES;
+#undef C
+const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
/**
*
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 192cb48d849a..2def1e3960f6 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <scsi/scsi_cmnd.h>
#include "isci.h"
#include "task.h"
#include "request.h"
@@ -60,6 +61,16 @@
#include "scu_event_codes.h"
#include "sas.h"
+#undef C
+#define C(a) (#a)
+const char *req_state_name(enum sci_base_request_states state)
+{
+ static const char * const strings[] = REQUEST_STATES;
+
+ return strings[state];
+}
+#undef C
+
static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
int idx)
{
@@ -264,6 +275,141 @@ static void scu_ssp_reqeust_construct_task_context(
task_context->response_iu_lower = lower_32_bits(dma_addr);
}
+static u8 scu_bg_blk_size(struct scsi_device *sdp)
+{
+ switch (sdp->sector_size) {
+ case 512:
+ return 0;
+ case 1024:
+ return 1;
+ case 4096:
+ return 3;
+ default:
+ return 0xff;
+ }
+}
+
+static u32 scu_dif_bytes(u32 len, u32 sector_size)
+{
+ return (len >> ilog2(sector_size)) * 8;
+}
+
+static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
+{
+ struct scu_task_context *tc = ireq->tc;
+ struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+ u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+ tc->block_guard_enable = 1;
+ tc->blk_prot_en = 1;
+ tc->blk_sz = blk_sz;
+ /* DIF write insert */
+ tc->blk_prot_func = 0x2;
+
+ tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+ scmd->device->sector_size);
+
+ /* always init to 0, used by hw */
+ tc->interm_crc_val = 0;
+
+ tc->init_crc_seed = 0;
+ tc->app_tag_verify = 0;
+ tc->app_tag_gen = 0;
+ tc->ref_tag_seed_verify = 0;
+
+ /* always init to same as bg_blk_sz */
+ tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+ tc->reserved_DC_0 = 0;
+
+ /* always init to 8 */
+ tc->DIF_bytes_immed_val = 8;
+
+ tc->reserved_DC_1 = 0;
+ tc->bgc_blk_sz = scmd->device->sector_size;
+ tc->reserved_E0_0 = 0;
+ tc->app_tag_gen_mask = 0;
+
+ /** setup block guard control **/
+ tc->bgctl = 0;
+
+ /* DIF write insert */
+ tc->bgctl_f.op = 0x2;
+
+ tc->app_tag_verify_mask = 0;
+
+ /* must init to 0 for hw */
+ tc->blk_guard_err = 0;
+
+ tc->reserved_E8_0 = 0;
+
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+ tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+ else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->ref_tag_seed_gen = 0;
+}
+
+static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
+{
+ struct scu_task_context *tc = ireq->tc;
+ struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
+ u8 blk_sz = scu_bg_blk_size(scmd->device);
+
+ tc->block_guard_enable = 1;
+ tc->blk_prot_en = 1;
+ tc->blk_sz = blk_sz;
+ /* DIF read strip */
+ tc->blk_prot_func = 0x1;
+
+ tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
+ scmd->device->sector_size);
+
+ /* always init to 0, used by hw */
+ tc->interm_crc_val = 0;
+
+ tc->init_crc_seed = 0;
+ tc->app_tag_verify = 0;
+ tc->app_tag_gen = 0;
+
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
+ tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+ else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->ref_tag_seed_verify = 0;
+
+ /* always init to same as bg_blk_sz */
+ tc->UD_bytes_immed_val = scmd->device->sector_size;
+
+ tc->reserved_DC_0 = 0;
+
+ /* always init to 8 */
+ tc->DIF_bytes_immed_val = 8;
+
+ tc->reserved_DC_1 = 0;
+ tc->bgc_blk_sz = scmd->device->sector_size;
+ tc->reserved_E0_0 = 0;
+ tc->app_tag_gen_mask = 0;
+
+ /** setup block guard control **/
+ tc->bgctl = 0;
+
+ /* DIF read strip */
+ tc->bgctl_f.crc_verify = 1;
+ tc->bgctl_f.op = 0x1;
+ if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
+ tc->bgctl_f.ref_tag_chk = 1;
+ tc->bgctl_f.app_f_detect = 1;
+ } else if (type & SCSI_PROT_DIF_TYPE3)
+ tc->bgctl_f.app_ref_f_detect = 1;
+
+ tc->app_tag_verify_mask = 0;
+
+ /* must init to 0 for hw */
+ tc->blk_guard_err = 0;
+
+ tc->reserved_E8_0 = 0;
+ tc->ref_tag_seed_gen = 0;
+}
+
/**
* This method is will fill in the SCU Task Context for a SSP IO request.
* @sci_req:
@@ -274,6 +420,10 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
u32 len)
{
struct scu_task_context *task_context = ireq->tc;
+ struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
+ struct scsi_cmnd *scmd = sas_task->uldd_task;
+ u8 prot_type = scsi_get_prot_type(scmd);
+ u8 prot_op = scsi_get_prot_op(scmd);
scu_ssp_reqeust_construct_task_context(ireq, task_context);
@@ -296,6 +446,13 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
if (task_context->transfer_length_bytes > 0)
sci_request_build_sgl(ireq);
+
+ if (prot_type != SCSI_PROT_DIF_TYPE0) {
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
+ }
}
/**
@@ -519,18 +676,12 @@ sci_io_request_construct_sata(struct isci_request *ireq,
if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
- if (tmf->tmf_code == isci_tmf_sata_srst_high ||
- tmf->tmf_code == isci_tmf_sata_srst_low) {
- scu_stp_raw_request_construct_task_context(ireq);
- return SCI_SUCCESS;
- } else {
- dev_err(&ireq->owning_controller->pdev->dev,
- "%s: Request 0x%p received un-handled SAT "
- "management protocol 0x%x.\n",
- __func__, ireq, tmf->tmf_code);
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
- return SCI_FAILURE;
- }
+ return SCI_FAILURE;
}
if (!sas_protocol_ata(task->task_proto)) {
@@ -627,34 +778,6 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
return status;
}
-enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
-{
- enum sci_status status = SCI_SUCCESS;
-
- /* check for management protocols */
- if (test_bit(IREQ_TMF, &ireq->flags)) {
- struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-
- if (tmf->tmf_code == isci_tmf_sata_srst_high ||
- tmf->tmf_code == isci_tmf_sata_srst_low) {
- scu_stp_raw_request_construct_task_context(ireq);
- } else {
- dev_err(&ireq->owning_controller->pdev->dev,
- "%s: Request 0x%p received un-handled SAT "
- "Protocol 0x%x.\n",
- __func__, ireq, tmf->tmf_code);
-
- return SCI_FAILURE;
- }
- }
-
- if (status != SCI_SUCCESS)
- return status;
- sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
-
- return status;
-}
-
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
* @ireq: request that was terminated early
@@ -756,9 +879,6 @@ sci_io_request_terminate(struct isci_request *ireq)
case SCI_REQ_STP_PIO_WAIT_FRAME:
case SCI_REQ_STP_PIO_DATA_IN:
case SCI_REQ_STP_PIO_DATA_OUT:
- case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
- case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
- case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
case SCI_REQ_ATAPI_WAIT_H2D:
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
case SCI_REQ_ATAPI_WAIT_D2H:
@@ -800,7 +920,8 @@ enum sci_status sci_request_complete(struct isci_request *ireq)
state = ireq->sm.current_state_id;
if (WARN_ONCE(state != SCI_REQ_COMPLETED,
- "isci: request completion from wrong state (%d)\n", state))
+ "isci: request completion from wrong state (%s)\n",
+ req_state_name(state)))
return SCI_FAILURE_INVALID_STATE;
if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
@@ -821,8 +942,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
state = ireq->sm.current_state_id;
if (state != SCI_REQ_STP_PIO_DATA_IN) {
- dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
- __func__, event_code, state);
+ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
+ __func__, event_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@@ -1304,9 +1425,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
struct page *page = sg_page(sg);
copy_len = min_t(int, total_len, sg_dma_len(sg));
- kaddr = kmap_atomic(page, KM_IRQ0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr + sg->offset, src_addr, copy_len);
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
total_len -= copy_len;
src_addr += copy_len;
sg = sg_next(sg);
@@ -1654,7 +1775,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
- kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sg));
rsp = kaddr + sg->offset;
sci_swab32_cpy(rsp, frame_header, 1);
@@ -1691,7 +1812,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
sci_controller_release_frame(ihost, frame_index);
@@ -1938,59 +2059,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
return status;
}
- case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
- struct dev_to_host_fis *frame_header;
- u32 *frame_buffer;
-
- status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
- frame_index,
- (void **)&frame_header);
- if (status != SCI_SUCCESS) {
- dev_err(&ihost->pdev->dev,
- "%s: SCIC IO Request 0x%p could not get frame "
- "header for frame index %d, status %x\n",
- __func__,
- stp_req,
- frame_index,
- status);
- return status;
- }
-
- switch (frame_header->fis_type) {
- case FIS_REGD2H:
- sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
- frame_index,
- (void **)&frame_buffer);
-
- sci_controller_copy_sata_response(&ireq->stp.rsp,
- frame_header,
- frame_buffer);
-
- /* The command has completed with error */
- ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
- ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
- break;
-
- default:
- dev_warn(&ihost->pdev->dev,
- "%s: IO Request:0x%p Frame Id:%d protocol "
- "violation occurred\n",
- __func__,
- stp_req,
- frame_index);
-
- ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
- ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
- break;
- }
-
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-
- /* Frame has been decoded return it to the controller */
- sci_controller_release_frame(ihost, frame_index);
-
- return status;
- }
case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
struct sas_task *task = isci_request_access_task(ireq);
@@ -2088,57 +2156,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
return status;
}
-static enum sci_status
-stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
- u32 completion_code)
-{
- switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- ireq->scu_status = SCU_TASK_DONE_GOOD;
- ireq->sci_status = SCI_SUCCESS;
- sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
- break;
-
- default:
- /*
- * All other completion status cause the IO to be complete.
- * If a NAK was received, then it is up to the user to retry
- * the request.
- */
- ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
- ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- break;
- }
-
- return SCI_SUCCESS;
-}
-
-static enum sci_status
-stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
- u32 completion_code)
-{
- switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- ireq->scu_status = SCU_TASK_DONE_GOOD;
- ireq->sci_status = SCI_SUCCESS;
- sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
- break;
-
- default:
- /* All other completion status cause the IO to be complete. If
- * a NAK was received, then it is up to the user to retry the
- * request.
- */
- ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
- ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- break;
- }
-
- return SCI_SUCCESS;
-}
-
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
enum sci_base_request_states next)
{
@@ -2284,14 +2301,6 @@ sci_io_request_tc_completion(struct isci_request *ireq,
case SCI_REQ_STP_PIO_DATA_OUT:
return pio_data_out_tx_done_tc_event(ireq, completion_code);
- case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
- return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
- completion_code);
-
- case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
- return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
- completion_code);
-
case SCI_REQ_ABORTING:
return request_aborting_state_tc_event(ireq,
completion_code);
@@ -2308,12 +2317,8 @@ sci_io_request_tc_completion(struct isci_request *ireq,
return atapi_data_tc_completion_handler(ireq, completion_code);
default:
- dev_warn(&ihost->pdev->dev,
- "%s: SCIC IO Request given task completion "
- "notification %x while in wrong state %d\n",
- __func__,
- completion_code,
- state);
+ dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
+ __func__, completion_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -3023,10 +3028,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
/* need to swab it back in case the command buffer is re-used */
- kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
break;
}
default:
@@ -3065,10 +3070,6 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
*/
if (!task && dev->dev_type == SAS_END_DEV) {
state = SCI_REQ_TASK_WAIT_TC_COMP;
- } else if (!task &&
- (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
- isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
- state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
state = SCI_REQ_SMP_WAIT_RESP;
} else if (task && sas_protocol_ata(task->task_proto) &&
@@ -3125,31 +3126,6 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba
ireq->target_device->working_request = ireq;
}
-static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
-{
- struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
-
- ireq->target_device->working_request = ireq;
-}
-
-static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
-{
- struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
- struct scu_task_context *tc = ireq->tc;
- struct host_to_dev_fis *h2d_fis;
- enum sci_status status;
-
- /* Clear the SRST bit */
- h2d_fis = &ireq->stp.cmd;
- h2d_fis->control = 0;
-
- /* Clear the TC control bit */
- tc->control_frame = 0;
-
- status = sci_controller_continue_io(ireq);
- WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
-}
-
static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_INIT] = { },
[SCI_REQ_CONSTRUCTED] = { },
@@ -3168,13 +3144,6 @@ static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_STP_PIO_DATA_OUT] = { },
[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
- [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
- .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
- },
- [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
- .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
- },
- [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
[SCI_REQ_TASK_WAIT_TC_COMP] = { },
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
[SCI_REQ_SMP_WAIT_RESP] = { },
@@ -3311,7 +3280,7 @@ sci_io_request_construct_smp(struct device *dev,
u8 req_len;
u32 cmd;
- kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
/*
* Look at the SMP requests' header fields; for certain SAS 1.x SMP
@@ -3337,7 +3306,7 @@ sci_io_request_construct_smp(struct device *dev,
req_len = smp_req->req_len;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
cmd = *(u32 *) smp_req;
- kunmap_atomic(kaddr, KM_IRQ0);
+ kunmap_atomic(kaddr);
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE;
@@ -3649,8 +3618,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
/* Cause this task to be scheduled in the SCSI error
* handler thread.
*/
- isci_execpath_callback(ihost, task,
- sas_task_abort);
+ sas_task_abort(task);
/* Change the status, since we are holding
* the I/O until it is managed by the SCSI
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index be38933dd6df..057f2378452d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -182,138 +182,103 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
}
/**
- * enum sci_base_request_states - This enumeration depicts all the states for
- * the common request state machine.
+ * enum sci_base_request_states - request state machine states
*
+ * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
*
+ * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
+ * constructed. This state is entered from the INITIAL state.
+ *
+ * @SCI_REQ_STARTED: This state indicates that the request has been started.
+ * This state is entered from the CONSTRUCTED state.
+ *
+ * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ * @SCI_REQ_STP_UDMA_WAIT_D2H:
+ * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ *
+ * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
+ * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
+ * received is based on the result of the prior frame and line conditions.
+ *
+ * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
+ * waiting for a DATA frame from the device.
+ *
+ * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
+ * waiting to transmit the next data frame to the device.
+ *
+ * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
+ * waiting for the TC completion notification for the H2D Register FIS
+ *
+ * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
+ * waiting for either a PIO Setup.
+ *
+ * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
+ * after receiving TC completion. While in this state IO request object is
+ * waiting for D2H status frame as UF.
+ *
+ * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
+ * task context completion after every frame submission, so in the
+ * non-accelerated case we need to expect the completion for the "cdb" frame.
+ *
+ * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started raw task management request is waiting for the transmission of
+ * the initial frame (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
+ * management request is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ *
+ * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
+ * the started SMP request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
+ *
+ * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered from the
+ * ABORTING state.
+ *
+ * @SCI_REQ_ABORTING: This state indicates that the request is in the process
+ * of being terminated/aborted. This state is entered from the CONSTRUCTED
+ * state. This state is entered from the STARTED state.
+ *
+ * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
*/
-enum sci_base_request_states {
- /*
- * Simply the initial state for the base request state machine.
- */
- SCI_REQ_INIT,
-
- /*
- * This state indicates that the request has been constructed.
- * This state is entered from the INITIAL state.
- */
- SCI_REQ_CONSTRUCTED,
-
- /*
- * This state indicates that the request has been started. This state
- * is entered from the CONSTRUCTED state.
- */
- SCI_REQ_STARTED,
-
- SCI_REQ_STP_UDMA_WAIT_TC_COMP,
- SCI_REQ_STP_UDMA_WAIT_D2H,
-
- SCI_REQ_STP_NON_DATA_WAIT_H2D,
- SCI_REQ_STP_NON_DATA_WAIT_D2H,
-
- SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
- SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
- SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
-
- /*
- * While in this state the IO request object is waiting for the TC
- * completion notification for the H2D Register FIS
- */
- SCI_REQ_STP_PIO_WAIT_H2D,
-
- /*
- * While in this state the IO request object is waiting for either a
- * PIO Setup FIS or a D2H register FIS. The type of frame received is
- * based on the result of the prior frame and line conditions.
- */
- SCI_REQ_STP_PIO_WAIT_FRAME,
-
- /*
- * While in this state the IO request object is waiting for a DATA
- * frame from the device.
- */
- SCI_REQ_STP_PIO_DATA_IN,
-
- /*
- * While in this state the IO request object is waiting to transmit
- * the next data frame to the device.
- */
- SCI_REQ_STP_PIO_DATA_OUT,
-
- /*
- * While in this state the IO request object is waiting for the TC
- * completion notification for the H2D Register FIS
- */
- SCI_REQ_ATAPI_WAIT_H2D,
-
- /*
- * While in this state the IO request object is waiting for either a
- * PIO Setup.
- */
- SCI_REQ_ATAPI_WAIT_PIO_SETUP,
-
- /*
- * The non-data IO transit to this state in this state after receiving
- * TC completion. While in this state IO request object is waiting for
- * D2H status frame as UF.
- */
- SCI_REQ_ATAPI_WAIT_D2H,
-
- /*
- * When transmitting raw frames hardware reports task context completion
- * after every frame submission, so in the non-accelerated case we need
- * to expect the completion for the "cdb" frame.
- */
- SCI_REQ_ATAPI_WAIT_TC_COMP,
-
- /*
- * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
- * task management request is waiting for the transmission of the
- * initial frame (i.e. command, task, etc.).
- */
- SCI_REQ_TASK_WAIT_TC_COMP,
-
- /*
- * This sub-state indicates that the started task management request
- * is waiting for the reception of an unsolicited frame
- * (i.e. response IU).
- */
- SCI_REQ_TASK_WAIT_TC_RESP,
-
- /*
- * This sub-state indicates that the started task management request
- * is waiting for the reception of an unsolicited frame
- * (i.e. response IU).
- */
- SCI_REQ_SMP_WAIT_RESP,
-
- /*
- * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
- * request is waiting for the transmission of the initial frame
- * (i.e. command, task, etc.).
- */
- SCI_REQ_SMP_WAIT_TC_COMP,
-
- /*
- * This state indicates that the request has completed.
- * This state is entered from the STARTED state. This state is entered
- * from the ABORTING state.
- */
- SCI_REQ_COMPLETED,
-
- /*
- * This state indicates that the request is in the process of being
- * terminated/aborted.
- * This state is entered from the CONSTRUCTED state.
- * This state is entered from the STARTED state.
- */
- SCI_REQ_ABORTING,
-
- /*
- * Simply the final state for the base request state machine.
- */
- SCI_REQ_FINAL,
-};
+#define REQUEST_STATES {\
+ C(REQ_INIT),\
+ C(REQ_CONSTRUCTED),\
+ C(REQ_STARTED),\
+ C(REQ_STP_UDMA_WAIT_TC_COMP),\
+ C(REQ_STP_UDMA_WAIT_D2H),\
+ C(REQ_STP_NON_DATA_WAIT_H2D),\
+ C(REQ_STP_NON_DATA_WAIT_D2H),\
+ C(REQ_STP_PIO_WAIT_H2D),\
+ C(REQ_STP_PIO_WAIT_FRAME),\
+ C(REQ_STP_PIO_DATA_IN),\
+ C(REQ_STP_PIO_DATA_OUT),\
+ C(REQ_ATAPI_WAIT_H2D),\
+ C(REQ_ATAPI_WAIT_PIO_SETUP),\
+ C(REQ_ATAPI_WAIT_D2H),\
+ C(REQ_ATAPI_WAIT_TC_COMP),\
+ C(REQ_TASK_WAIT_TC_COMP),\
+ C(REQ_TASK_WAIT_TC_RESP),\
+ C(REQ_SMP_WAIT_RESP),\
+ C(REQ_SMP_WAIT_TC_COMP),\
+ C(REQ_COMPLETED),\
+ C(REQ_ABORTING),\
+ C(REQ_FINAL),\
+ }
+#undef C
+#define C(a) SCI_##a
+enum sci_base_request_states REQUEST_STATES;
+#undef C
+const char *req_state_name(enum sci_base_request_states state);
enum sci_status sci_request_start(struct isci_request *ireq);
enum sci_status sci_io_request_terminate(struct isci_request *ireq);
@@ -446,10 +411,7 @@ sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag,
struct isci_request *ireq);
-enum sci_status
-sci_task_request_construct_ssp(struct isci_request *ireq);
-enum sci_status
-sci_task_request_construct_sata(struct isci_request *ireq);
+enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
void sci_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
index 7df87d923285..869a979eb5b2 100644
--- a/drivers/scsi/isci/scu_task_context.h
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -866,9 +866,9 @@ struct scu_task_context {
struct transport_snapshot snapshot; /* read only set to 0 */
/* OFFSET 0x5C */
- u32 block_protection_enable:1;
- u32 block_size:2;
- u32 block_protection_function:2;
+ u32 blk_prot_en:1;
+ u32 blk_sz:2;
+ u32 blk_prot_func:2;
u32 reserved_5C_0:9;
u32 active_sgl_element:2; /* read only set to 0 */
u32 sgl_exhausted:1; /* read only set to 0 */
@@ -896,33 +896,56 @@ struct scu_task_context {
u32 reserved_C4_CC[3];
/* OFFSET 0xD0 */
- u32 intermediate_crc_value:16;
- u32 initial_crc_seed:16;
+ u32 interm_crc_val:16;
+ u32 init_crc_seed:16;
/* OFFSET 0xD4 */
- u32 application_tag_for_verify:16;
- u32 application_tag_for_generate:16;
+ u32 app_tag_verify:16;
+ u32 app_tag_gen:16;
/* OFFSET 0xD8 */
- u32 reference_tag_seed_for_verify_function;
+ u32 ref_tag_seed_verify;
/* OFFSET 0xDC */
- u32 reserved_DC;
+ u32 UD_bytes_immed_val:13;
+ u32 reserved_DC_0:3;
+ u32 DIF_bytes_immed_val:4;
+ u32 reserved_DC_1:12;
/* OFFSET 0xE0 */
- u32 reserved_E0_0:16;
- u32 application_tag_mask_for_generate:16;
+ u32 bgc_blk_sz:13;
+ u32 reserved_E0_0:3;
+ u32 app_tag_gen_mask:16;
/* OFFSET 0xE4 */
- u32 block_protection_control:16;
- u32 application_tag_mask_for_verify:16;
+ union {
+ u16 bgctl;
+ struct {
+ u16 crc_verify:1;
+ u16 app_tag_chk:1;
+ u16 ref_tag_chk:1;
+ u16 op:2;
+ u16 legacy:1;
+ u16 invert_crc_seed:1;
+ u16 ref_tag_gen:1;
+ u16 fixed_ref_tag:1;
+ u16 invert_crc:1;
+ u16 app_ref_f_detect:1;
+ u16 uninit_dif_check_err:1;
+ u16 uninit_dif_bypass:1;
+ u16 app_f_detect:1;
+ u16 reserved_0:2;
+ } bgctl_f;
+ };
+
+ u16 app_tag_verify_mask;
/* OFFSET 0xE8 */
- u32 block_protection_error:8;
+ u32 blk_guard_err:8;
u32 reserved_E8_0:24;
/* OFFSET 0xEC */
- u32 reference_tag_seed_for_verify;
+ u32 ref_tag_seed_gen;
/* OFFSET 0xF0 */
u32 intermediate_crc_valid_snapshot:16;
@@ -937,6 +960,6 @@ struct scu_task_context {
/* OFFSET 0xFC */
u32 reference_tag_seed_for_generate_function_snapshot;
-};
+} __packed;
#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index f5a3f7d2bdab..374254ede9d4 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -96,8 +96,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
__func__, task, response, status);
task->lldd_task = NULL;
-
- isci_execpath_callback(ihost, task, task->task_done);
+ task->task_done(task);
break;
case isci_perform_aborted_io_completion:
@@ -117,8 +116,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
"%s: Error - task = %p, response=%d, "
"status=%d\n",
__func__, task, response, status);
-
- isci_execpath_callback(ihost, task, sas_task_abort);
+ sas_task_abort(task);
break;
default:
@@ -249,46 +247,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
return 0;
}
-static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
-{
- struct isci_tmf *isci_tmf;
- enum sci_status status;
-
- if (!test_bit(IREQ_TMF, &ireq->flags))
- return SCI_FAILURE;
-
- isci_tmf = isci_request_access_tmf(ireq);
-
- switch (isci_tmf->tmf_code) {
-
- case isci_tmf_sata_srst_high:
- case isci_tmf_sata_srst_low: {
- struct host_to_dev_fis *fis = &ireq->stp.cmd;
-
- memset(fis, 0, sizeof(*fis));
-
- fis->fis_type = 0x27;
- fis->flags &= ~0x80;
- fis->flags &= 0xF0;
- if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
- fis->control |= ATA_SRST;
- else
- fis->control &= ~ATA_SRST;
- break;
- }
- /* other management commnd go here... */
- default:
- return SCI_FAILURE;
- }
-
- /* core builds the protocol specific request
- * based on the h2d fis.
- */
- status = sci_task_request_construct_sata(ireq);
-
- return status;
-}
-
static struct isci_request *isci_task_request_build(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 tag, struct isci_tmf *isci_tmf)
@@ -328,13 +286,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return NULL;
}
- if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
- isci_tmf->proto = SAS_PROTOCOL_SATA;
- status = isci_sata_management_task_request_build(ireq);
-
- if (status != SCI_SUCCESS)
- return NULL;
- }
return ireq;
}
@@ -873,53 +824,20 @@ static int isci_task_send_lu_reset_sas(
return ret;
}
-static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
- struct isci_remote_device *idev, u8 *lun)
-{
- int ret = TMF_RESP_FUNC_FAILED;
- struct isci_tmf tmf;
-
- /* Send the soft reset to the target */
- #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
- isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
-
- ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
-
- if (ret != TMF_RESP_FUNC_COMPLETE) {
- dev_dbg(&ihost->pdev->dev,
- "%s: Assert SRST failed (%p) = %x",
- __func__, idev, ret);
-
- /* Return the failure so that the LUN reset is escalated
- * to a target reset.
- */
- }
- return ret;
-}
-
-/**
- * isci_task_lu_reset() - This function is one of the SAS Domain Template
- * functions. This is one of the Task Management functoins called by libsas,
- * to reset the given lun. Note the assumption that while this call is
- * executing, no I/O will be sent by the host to the device.
- * @lun: This parameter specifies the lun to be reset.
- *
- * status, zero indicates success.
- */
-int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
{
- struct isci_host *isci_host = dev_to_ihost(domain_device);
+ struct isci_host *isci_host = dev_to_ihost(dev);
struct isci_remote_device *isci_device;
unsigned long flags;
int ret;
spin_lock_irqsave(&isci_host->scic_lock, flags);
- isci_device = isci_lookup_device(domain_device);
+ isci_device = isci_lookup_device(dev);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
- __func__, domain_device, isci_host, isci_device);
+ __func__, dev, isci_host, isci_device);
if (!isci_device) {
/* If the device is gone, stop the escalations. */
@@ -928,11 +846,11 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
- set_bit(IDEV_EH, &isci_device->flags);
/* Send the task management part of the reset. */
- if (sas_protocol_ata(domain_device->tproto)) {
- ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+ if (dev_is_sata(dev)) {
+ sas_ata_schedule_reset(dev);
+ ret = TMF_RESP_FUNC_COMPLETE;
} else
ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
@@ -1062,9 +980,6 @@ int isci_task_abort_task(struct sas_task *task)
"%s: dev = %p, task = %p, old_request == %p\n",
__func__, isci_device, task, old_request);
- if (isci_device)
- set_bit(IDEV_EH, &isci_device->flags);
-
/* Device reset conditions signalled in task_state_flags are the
* responsbility of libsas to observe at the start of the error
* handler thread.
@@ -1332,29 +1247,35 @@ isci_task_request_complete(struct isci_host *ihost,
}
static int isci_reset_device(struct isci_host *ihost,
+ struct domain_device *dev,
struct isci_remote_device *idev)
{
- struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
- enum sci_status status;
- unsigned long flags;
int rc;
+ unsigned long flags;
+ enum sci_status status;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ struct isci_port *iport = dev->port->lldd_port;
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
spin_lock_irqsave(&ihost->scic_lock, flags);
status = sci_remote_device_reset(idev);
- if (status != SCI_SUCCESS) {
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: sci_remote_device_reset(%p) returned %d!\n",
__func__, idev, status);
-
- return TMF_RESP_FUNC_FAILED;
+ rc = TMF_RESP_FUNC_FAILED;
+ goto out;
}
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- rc = sas_phy_reset(phy, true);
+ if (scsi_is_sas_phy_local(phy)) {
+ struct isci_phy *iphy = &ihost->phys[phy->number];
+
+ rc = isci_port_perform_hard_reset(ihost, iport, iphy);
+ } else
+ rc = sas_phy_reset(phy, !dev_is_sata(dev));
/* Terminate in-progress I/O now. */
isci_remote_device_nuke_requests(ihost, idev);
@@ -1371,7 +1292,8 @@ static int isci_reset_device(struct isci_host *ihost,
}
dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
-
+ out:
+ sas_put_local_phy(phy);
return rc;
}
@@ -1386,35 +1308,15 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
- if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
- ret = TMF_RESP_FUNC_COMPLETE;
- goto out;
- }
-
- ret = isci_reset_device(ihost, idev);
- out:
- isci_put_device(idev);
- return ret;
-}
-
-int isci_bus_reset_handler(struct scsi_cmnd *cmd)
-{
- struct domain_device *dev = sdev_to_domain_dev(cmd->device);
- struct isci_host *ihost = dev_to_ihost(dev);
- struct isci_remote_device *idev;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- idev = isci_lookup_device(dev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
if (!idev) {
+ /* XXX: need to cleanup any ireqs targeting this
+ * domain_device
+ */
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
- ret = isci_reset_device(ihost, idev);
+ ret = isci_reset_device(ihost, dev, idev);
out:
isci_put_device(idev);
return ret;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 1b27b3797c6c..7b6d0e32fd9b 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -86,8 +86,6 @@ enum isci_tmf_function_codes {
isci_tmf_func_none = 0,
isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
isci_tmf_ssp_lun_reset = TMF_LU_RESET,
- isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
- isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
};
/**
* struct isci_tmf - This class represents the task management object which
@@ -210,8 +208,6 @@ int isci_queuecommand(
struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *));
-int isci_bus_reset_handler(struct scsi_cmnd *cmd);
-
/**
* enum isci_completion_selection - This enum defines the possible actions to
* take with respect to a given request's notification back to libsas.
@@ -321,40 +317,4 @@ isci_task_set_completion_status(
return task_notification_selection;
}
-/**
-* isci_execpath_callback() - This function is called from the task
-* execute path when the task needs to callback libsas about the submit-time
-* task failure. The callback occurs either through the task's done function
-* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
-* requests, libsas takes the host lock before calling execute task. Therefore
-* in this situation the host lock must be managed before calling the func.
-*
-* @ihost: This parameter is the controller to which the I/O request was sent.
-* @task: This parameter is the I/O request.
-* @func: This parameter is the function to call in the correct context.
-* @status: This parameter is the status code for the completed task.
-*
-*/
-static inline void isci_execpath_callback(struct isci_host *ihost,
- struct sas_task *task,
- void (*func)(struct sas_task *))
-{
- struct domain_device *dev = task->dev;
-
- if (dev_is_sata(dev) && task->uldd_task) {
- unsigned long flags;
-
- /* Since we are still in the submit path, and since
- * libsas takes the host lock on behalf of SATA
- * devices before I/O starts (in the non-discovery case),
- * we need to unlock before we can call the callback function.
- */
- raw_local_irq_save(flags);
- spin_unlock(dev->sata_dev.ap->lock);
- func(task);
- spin_lock(dev->sata_dev.ap->lock);
- raw_local_irq_restore(flags);
- } else
- func(task);
-}
#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index db47158e0dde..453a740fa68e 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -684,10 +684,8 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- int value;
switch(param) {
case ISCSI_PARAM_HDRDGST_EN:
@@ -699,16 +697,7 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
break;
case ISCSI_PARAM_MAX_R2T:
- sscanf(buf, "%d", &value);
- if (value <= 0 || !is_power_of_2(value))
- return -EINVAL;
- if (session->max_r2t == value)
- break;
- iscsi_tcp_r2tpool_free(session);
- iscsi_set_param(cls_conn, param, buf, buflen);
- if (iscsi_tcp_r2tpool_alloc(session))
- return -ENOMEM;
- break;
+ return iscsi_tcp_set_max_r2t(conn, buf);
default:
return iscsi_set_param(cls_conn, param, buf, buflen);
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1d1b0c9da29b..8e561e6a557c 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -337,6 +337,13 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
schedule_delayed_work(&disc->disc_work, delay);
} else
fc_disc_done(disc, DISC_EV_FAILED);
+ } else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
+ /*
+ * if discovery fails due to lport reset, clear
+ * pending flag so that subsequent discovery can
+ * continue
+ */
+ disc->pending = 0;
}
}
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index e17a28d324d0..c2384d501470 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -56,8 +56,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
- rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
- did = FC_FID_DIR_SERV;
+ rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
}
if (rc) {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 4d70d96fa5dc..aceffadb21c7 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1642,9 +1642,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
case FC_RCTL_ACK_0:
break;
default:
- FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
- fh->fh_r_ctl,
- fc_exch_rctl_name(fh->fh_r_ctl));
+ if (ep)
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
break;
}
fc_frame_free(fp);
@@ -2262,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
mp->class = class;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
- mp->max_xid = max_xid;
+
+ /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+ pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+ sizeof(struct fc_exch *);
+ if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+ mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+ min_xid - 1;
+ } else {
+ mp->max_xid = max_xid;
+ pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+ (fc_cpu_mask + 1);
+ }
mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
if (!mp->ep_pool)
@@ -2273,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
* divided across all cpus. The exch pointers array memory is
* allocated for exch range per pool.
*/
- pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
mp->pool_max_index = pool_exch_range - 1;
/*
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index f607314810ac..f7357308655a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
- &offset, KM_SOFTIRQ0, NULL);
+ &offset, NULL);
} else {
crc = crc32(~0, (u8 *) fh, sizeof(*fh));
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
- &offset, KM_SOFTIRQ0, &crc);
+ &offset, &crc);
buf = fc_frame_payload_get(fp, 0);
if (len % 4)
crc = crc32(crc, buf + len, 4 - (len % 4));
@@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
* The scatterlist item may be bigger than PAGE_SIZE,
* but we must not cross pages inside the kmap.
*/
- page_addr = kmap_atomic(page, KM_SOFTIRQ0);
+ page_addr = kmap_atomic(page);
memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
sg_bytes);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ kunmap_atomic(page_addr);
data += sg_bytes;
}
offset += sg_bytes;
@@ -1074,8 +1074,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
- int_to_scsilun(fsp->cmd->device->lun,
- (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+ int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun);
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
@@ -1257,7 +1256,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
- int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+ int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun);
fsp->wait_for_comp = 1;
init_completion(&fsp->tm_done);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 1bf9841ef154..8d65a51a7598 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -105,14 +105,13 @@ module_exit(libfc_exit);
* @sg: pointer to the pointer of the SG list.
* @nents: pointer to the remaining number of entries in the SG list.
* @offset: pointer to the current offset in the SG list.
- * @km_type: dedicated page table slot type for kmap_atomic.
* @crc: pointer to the 32-bit crc value.
* If crc is NULL, CRC is not calculated.
*/
u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg,
u32 *nents, size_t *offset,
- enum km_type km_type, u32 *crc)
+ u32 *crc)
{
size_t remaining = len;
u32 copy_len = 0;
@@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
off = *offset + sg->offset;
sg_bytes = min(sg_bytes,
(size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
- page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
- km_type);
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
if (crc)
*crc = crc32(*crc, buf, sg_bytes);
memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
- kunmap_atomic(page_addr, km_type);
+ kunmap_atomic(page_addr);
buf += sg_bytes;
*offset += sg_bytes;
remaining -= sg_bytes;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c7d071289af5..c2830cc66d6a 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg,
u32 *nents, size_t *offset,
- enum km_type km_type, u32 *crc);
+ u32 *crc);
#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 83750ebb527f..ef9560dff295 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -116,6 +116,8 @@ static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
static void fc_lport_enter_scr(struct fc_lport *);
static void fc_lport_enter_ready(struct fc_lport *);
static void fc_lport_enter_logo(struct fc_lport *);
+static void fc_lport_enter_fdmi(struct fc_lport *lport);
+static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
static const char *fc_lport_state_names[] = {
[LPORT_ST_DISABLED] = "disabled",
@@ -126,6 +128,11 @@ static const char *fc_lport_state_names[] = {
[LPORT_ST_RSPN_ID] = "RSPN_ID",
[LPORT_ST_RFT_ID] = "RFT_ID",
[LPORT_ST_RFF_ID] = "RFF_ID",
+ [LPORT_ST_FDMI] = "FDMI",
+ [LPORT_ST_RHBA] = "RHBA",
+ [LPORT_ST_RPA] = "RPA",
+ [LPORT_ST_DHBA] = "DHBA",
+ [LPORT_ST_DPRT] = "DPRT",
[LPORT_ST_SCR] = "SCR",
[LPORT_ST_READY] = "Ready",
[LPORT_ST_LOGO] = "LOGO",
@@ -183,11 +190,14 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
if (lport->state == LPORT_ST_DNS) {
lport->dns_rdata = rdata;
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
+ } else if (lport->state == LPORT_ST_FDMI) {
+ lport->ms_rdata = rdata;
+ fc_lport_enter_ms(lport, LPORT_ST_DHBA);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
"on port (%6.6x) for the directory "
"server, but the lport is not "
- "in the DNS state, it's in the "
+ "in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
lport->tt.rport_logoff(rdata);
@@ -196,7 +206,10 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
- lport->dns_rdata = NULL;
+ if (rdata->ids.port_id == FC_FID_DIR_SERV)
+ lport->dns_rdata = NULL;
+ else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
+ lport->ms_rdata = NULL;
break;
case RPORT_EV_NONE:
break;
@@ -1148,7 +1161,10 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
break;
case LPORT_ST_RFF_ID:
- fc_lport_enter_scr(lport);
+ if (lport->fdmi_enabled)
+ fc_lport_enter_fdmi(lport);
+ else
+ fc_lport_enter_scr(lport);
break;
default:
/* should have already been caught by state checks */
@@ -1163,6 +1179,85 @@ err:
}
/**
+ * fc_lport_ms_resp() - Handle response to a management server
+ * exchange
+ * @sp: current sequence in exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "but in state %s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_MGMT &&
+ ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "reason=%d explain=%d\n",
+ ct->ct_reason,
+ ct->ct_explan);
+
+ switch (lport->state) {
+ case LPORT_ST_RHBA:
+ if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ fc_lport_enter_ms(lport, LPORT_ST_RPA);
+ else /* Error Skip RPA */
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_RPA:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_DPRT:
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ case LPORT_ST_DHBA:
+ fc_lport_enter_ms(lport, LPORT_ST_DPRT);
+ break;
+ default:
+ /* should have already been caught by state checks */
+ break;
+ }
+ } else {
+ /* Invalid Frame? */
+ fc_lport_error(lport, fp);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
* fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
* @sp: current sequence in SCR exchange
* @fp: response frame
@@ -1339,6 +1434,123 @@ err:
}
/**
+ * fc_lport_enter_ms() - management server commands
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
+{
+ struct fc_frame *fp;
+ enum fc_fdmi_req cmd;
+ int size = sizeof(struct fc_ct_hdr);
+ size_t len;
+ int numattrs;
+
+ FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+ fc_lport_state_names[state],
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, state);
+
+ switch (state) {
+ case LPORT_ST_RHBA:
+ cmd = FC_FDMI_RHBA;
+ /* Number of HBA Attributes */
+ numattrs = 10;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+
+ size += len;
+ break;
+ case LPORT_ST_RPA:
+ cmd = FC_FDMI_RPA;
+ /* Number of Port Attributes */
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+ size += len;
+ break;
+ case LPORT_ST_DPRT:
+ cmd = FC_FDMI_DPRT;
+ len = sizeof(struct fc_fdmi_dprt);
+ size += len;
+ break;
+ case LPORT_ST_DHBA:
+ cmd = FC_FDMI_DHBA;
+ len = sizeof(struct fc_fdmi_dhba);
+ size += len;
+ break;
+ default:
+ fc_lport_error(lport, NULL);
+ return;
+ }
+
+ FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
+ cmd, (int)len, size);
+ fp = fc_frame_alloc(lport, size);
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
+ fc_lport_ms_resp,
+ lport, 3 * lport->r_a_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * @lport: The local port requesting a remote port for the management server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_fdmi(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_FDMI);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
+ goto err;
+
+ rdata->ops = &fc_lport_rport_ops;
+ lport->tt.rport_login(rdata);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
* fc_lport_timeout() - Handler for the retry_work timer
* @work: The work struct of the local port
*/
@@ -1371,6 +1583,15 @@ static void fc_lport_timeout(struct work_struct *work)
case LPORT_ST_RFF_ID:
fc_lport_enter_ns(lport, lport->state);
break;
+ case LPORT_ST_FDMI:
+ fc_lport_enter_fdmi(lport);
+ break;
+ case LPORT_ST_RHBA:
+ case LPORT_ST_RPA:
+ case LPORT_ST_DHBA:
+ case LPORT_ST_DPRT:
+ fc_lport_enter_ms(lport, lport->state);
+ break;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
@@ -1522,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
mfs = ntohs(flp->fl_csp.sp_bb_data) &
FC_SP_BB_DATA_MASK;
if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
- mfs < lport->mfs)
+ mfs <= lport->mfs) {
lport->mfs = mfs;
+ fc_host_maxframe_size(lport->host) = mfs;
+ } else {
+ FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+ "lport->mfs:%hu\n", mfs, lport->mfs);
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
csp_flags = ntohs(flp->fl_csp.sp_features);
r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
@@ -1698,7 +1927,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
job->reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
- &info->offset, KM_BIO_SRC_IRQ, NULL);
+ &info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 143bbe448bec..82c3fd4bc938 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1909,6 +1909,16 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
spin_lock(&session->lock);
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task) {
+ /*
+ * Raced with completion. Blk layer has taken ownership
+ * so let timeout code complete it now.
+ */
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
* We are probably in the middle of iscsi recovery so let
@@ -1925,16 +1935,6 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
goto done;
}
- task = (struct iscsi_task *)sc->SCp.ptr;
- if (!task) {
- /*
- * Raced with completion. Just reset timer, and let it
- * complete normally
- */
- rc = BLK_EH_RESET_TIMER;
- goto done;
- }
-
/*
* If we have sent (at least queued to the network layer) a pdu or
* recvd one for the task since the last timeout ask for
@@ -2807,6 +2807,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->username);
kfree(session->username_in);
kfree(session->targetname);
+ kfree(session->targetalias);
kfree(session->initiatorname);
kfree(session->ifacename);
@@ -3200,7 +3201,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%d", &session->initial_r2t_en);
break;
case ISCSI_PARAM_MAX_R2T:
- sscanf(buf, "%d", &session->max_r2t);
+ sscanf(buf, "%hu", &session->max_r2t);
break;
case ISCSI_PARAM_IMM_DATA_EN:
sscanf(buf, "%d", &session->imm_data_en);
@@ -3233,6 +3234,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
return iscsi_switch_str_param(&session->password_in, buf);
case ISCSI_PARAM_TARGET_NAME:
return iscsi_switch_str_param(&session->targetname, buf);
+ case ISCSI_PARAM_TARGET_ALIAS:
+ return iscsi_switch_str_param(&session->targetalias, buf);
case ISCSI_PARAM_TPGT:
sscanf(buf, "%d", &session->tpgt);
break;
@@ -3299,6 +3302,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_TARGET_NAME:
len = sprintf(buf, "%s\n", session->targetname);
break;
+ case ISCSI_PARAM_TARGET_ALIAS:
+ len = sprintf(buf, "%s\n", session->targetalias);
+ break;
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%d\n", session->tpgt);
break;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5715a3d0a3d3..552e8a2b6f5f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
if (recv) {
segment->atomic_mapped = true;
- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ segment->sg_mapped = kmap_atomic(sg_page(sg));
} else {
segment->atomic_mapped = false;
/* the xmit path can sleep with the page mapped so use kmap */
@@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{
if (segment->sg_mapped) {
if (segment->atomic_mapped)
- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+ kunmap_atomic(segment->sg_mapped);
else
kunmap(sg_page(segment->sg));
segment->sg_mapped = NULL;
@@ -1170,6 +1170,24 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
+int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
+{
+ struct iscsi_session *session = conn->session;
+ unsigned short r2ts = 0;
+
+ sscanf(buf, "%hu", &r2ts);
+ if (session->max_r2t == r2ts)
+ return 0;
+
+ if (!r2ts || !is_power_of_2(r2ts))
+ return -EINVAL;
+
+ session->max_r2t = r2ts;
+ iscsi_tcp_r2tpool_free(session);
+ return iscsi_tcp_r2tpool_alloc(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
+
void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index db9238f2ecb8..bc0cecc6ad62 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -23,6 +23,8 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/async.h>
+#include <linux/export.h>
#include <scsi/sas_ata.h>
#include "sas_internal.h"
@@ -93,22 +95,47 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
static void sas_ata_task_done(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
- struct domain_device *dev;
+ struct domain_device *dev = task->dev;
struct task_status_struct *stat = &task->task_status;
struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
- struct sas_ha_struct *sas_ha;
+ struct sas_ha_struct *sas_ha = dev->port->ha;
enum ata_completion_errors ac;
unsigned long flags;
struct ata_link *link;
+ struct ata_port *ap;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
+ task = NULL;
+ else if (qc && qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
+
+ /* check if libsas-eh got to the task before us */
+ if (unlikely(!task))
+ return;
if (!qc)
goto qc_already_gone;
- dev = qc->ap->private_data;
- sas_ha = dev->port->ha;
- link = &dev->sata_dev.ap->link;
+ ap = qc->ap;
+ link = &ap->link;
+
+ spin_lock_irqsave(ap->lock, flags);
+ /* check if we lost the race with libata/sas_ata_post_internal() */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ if (qc->scsicmd)
+ goto qc_already_gone;
+ else {
+ /* if eh is not involved and the port is frozen then the
+ * ata internal abort process has taken responsibility
+ * for this sas_task
+ */
+ return;
+ }
+ }
- spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
@@ -121,10 +148,6 @@ static void sas_ata_task_done(struct sas_task *task)
if (unlikely(link->eh_info.err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
}
-
- dev->sata_dev.sstatus = resp->sstatus;
- dev->sata_dev.serror = resp->serror;
- dev->sata_dev.scontrol = resp->scontrol;
} else {
ac = sas_to_ata_err(stat);
if (ac) {
@@ -144,24 +167,8 @@ static void sas_ata_task_done(struct sas_task *task)
}
qc->lldd_task = NULL;
- if (qc->scsicmd)
- ASSIGN_SAS_TASK(qc->scsicmd, NULL);
ata_qc_complete(qc);
- spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
-
- /*
- * If the sas_task has an ata qc, a scsi_cmnd and the aborted
- * flag is set, then we must have come in via the libsas EH
- * functions. When we exit this function, we need to put the
- * scsi_cmnd on the list of finished errors. The ata_qc_complete
- * call cleans up the libata side of things but we're protected
- * from the scsi_cmnd going away because the scsi_cmnd is owned
- * by the EH, making libata's call to scsi_done a NOP.
- */
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
- scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
qc_already_gone:
list_del_init(&task->list);
@@ -170,23 +177,30 @@ qc_already_gone:
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
- int res;
+ unsigned long flags;
struct sas_task *task;
- struct domain_device *dev = qc->ap->private_data;
+ struct scatterlist *sg;
+ int ret = AC_ERR_SYSTEM;
+ unsigned int si, xfer = 0;
+ struct ata_port *ap = qc->ap;
+ struct domain_device *dev = ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *host = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(host->transportt);
- struct scatterlist *sg;
- unsigned int xfer = 0;
- unsigned int si;
+
+ /* TODO: audit callers to ensure they are ready for qc_issue to
+ * unconditionally re-enable interrupts
+ */
+ local_irq_save(flags);
+ spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
- if (dev->gone)
- return AC_ERR_SYSTEM;
+ if (test_bit(SAS_DEV_GONE, &dev->state))
+ goto out;
task = sas_alloc_task(GFP_ATOMIC);
if (!task)
- return AC_ERR_SYSTEM;
+ goto out;
task->dev = dev;
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
@@ -231,21 +245,24 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
ASSIGN_SAS_TASK(qc->scsicmd, task);
if (sas_ha->lldd_max_execute_num < 2)
- res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
+ ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
else
- res = sas_queue_up(task);
+ ret = sas_queue_up(task);
/* Examine */
- if (res) {
- SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+ if (ret) {
+ SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
sas_free_task(task);
- return AC_ERR_SYSTEM;
+ ret = AC_ERR_SYSTEM;
}
- return 0;
+ out:
+ spin_lock(ap->lock);
+ local_irq_restore(flags);
+ return ret;
}
static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
@@ -256,83 +273,222 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
return true;
}
-static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
+{
+ return to_sas_internal(dev->port->ha->core.shost->transportt);
+}
+
+static void sas_get_ata_command_set(struct domain_device *dev);
+
+int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
+ if (phy->attached_tproto & SAS_PROTOCOL_STP)
+ dev->tproto = phy->attached_tproto;
+ if (phy->attached_sata_dev)
+ dev->tproto |= SATA_DEV;
+
+ if (phy->attached_dev_type == SATA_PENDING)
+ dev->dev_type = SATA_PENDING;
+ else {
+ int res;
+
+ dev->dev_type = SATA_DEV;
+ res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
+ &dev->sata_dev.rps_resp);
+ if (res) {
+ SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
+ "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
+ phy->phy_id, res);
+ return res;
+ }
+ memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
+ sizeof(struct dev_to_host_fis));
+ /* TODO switch to ata_dev_classify() */
+ sas_get_ata_command_set(dev);
+ }
+ return 0;
+}
+
+static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
+{
+ int res;
+
+ /* we weren't pending, so successfully end the reset sequence now */
+ if (dev->dev_type != SATA_PENDING)
+ return 1;
+
+ /* hmmm, if this succeeds do we need to repost the domain_device to the
+ * lldd so it can pick up new parameters?
+ */
+ res = sas_get_ata_info(dev, phy);
+ if (res)
+ return 0; /* retry */
+ else
+ return 1;
+}
+
+static int smp_ata_check_ready(struct ata_link *link)
+{
+ int res;
struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
- struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
- int res = TMF_RESP_FUNC_FAILED;
- int ret = 0;
+ struct domain_device *ex_dev = dev->parent;
+ struct sas_phy *phy = sas_get_local_phy(dev);
+ struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
- if (i->dft->lldd_I_T_nexus_reset)
- res = i->dft->lldd_I_T_nexus_reset(dev);
+ res = sas_ex_phy_discover(ex_dev, phy->number);
+ sas_put_local_phy(phy);
- if (res != TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
- ret = -EAGAIN;
+ /* break the wait early if the expander is unreachable,
+ * otherwise keep polling
+ */
+ if (res == -ECOMM)
+ return res;
+ if (res != SMP_RESP_FUNC_ACC)
+ return 0;
+
+ switch (ex_phy->attached_dev_type) {
+ case SATA_PENDING:
+ return 0;
+ case SAS_END_DEV:
+ if (ex_phy->attached_sata_dev)
+ return sas_ata_clear_pending(dev, ex_phy);
+ default:
+ return -ENODEV;
}
+}
- switch (dev->sata_dev.command_set) {
- case ATA_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATA device.\n", __func__);
- *class = ATA_DEV_ATA;
- break;
- case ATAPI_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
- *class = ATA_DEV_ATAPI;
- break;
- default:
- SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
- __func__,
- dev->sata_dev.command_set);
- *class = ATA_DEV_UNKNOWN;
- break;
+static int local_ata_check_ready(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+
+ if (i->dft->lldd_ata_check_ready)
+ return i->dft->lldd_ata_check_ready(dev);
+ else {
+ /* lldd's that don't implement 'ready' checking get the
+ * old default behavior of not coordinating reset
+ * recovery with libata
+ */
+ return 1;
}
+}
- ap->cbl = ATA_CBL_SATA;
- return ret;
+static int sas_ata_printk(const char *level, const struct domain_device *ddev,
+ const char *fmt, ...)
+{
+ struct ata_port *ap = ddev->sata_dev.ap;
+ struct device *dev = &ddev->rphy->dev;
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%ssas: ata%u: %s: %pV",
+ level, ap->print_id, dev_name(dev), &vaf);
+
+ va_end(args);
+
+ return r;
}
-static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
+ int ret = 0, res;
+ struct sas_phy *phy;
struct ata_port *ap = link->ap;
+ int (*check_ready)(struct ata_link *link);
struct domain_device *dev = ap->private_data;
- struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
- int res = TMF_RESP_FUNC_FAILED;
- int ret = 0;
+ struct sas_internal *i = dev_to_sas_internal(dev);
- if (i->dft->lldd_ata_soft_reset)
- res = i->dft->lldd_ata_soft_reset(dev);
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+ if (res == -ENODEV)
+ return res;
- if (res != TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
- ret = -EAGAIN;
- }
+ if (res != TMF_RESP_FUNC_COMPLETE)
+ sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+ phy = sas_get_local_phy(dev);
+ if (scsi_is_sas_phy_local(phy))
+ check_ready = local_ata_check_ready;
+ else
+ check_ready = smp_ata_check_ready;
+ sas_put_local_phy(phy);
+
+ ret = ata_wait_after_reset(link, deadline, check_ready);
+ if (ret && ret != -EAGAIN)
+ sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
+ /* XXX: if the class changes during the reset the upper layer
+ * should be informed, if the device has gone away we assume
+ * libsas will eventually delete it
+ */
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATA device.\n", __func__);
*class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
- SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
*class = ATA_DEV_ATAPI;
break;
- default:
- SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
- __func__, dev->sata_dev.command_set);
- *class = ATA_DEV_UNKNOWN;
- break;
}
ap->cbl = ATA_CBL_SATA;
return ret;
}
+/*
+ * notify the lldd to forget the sas_task for this internal ata command
+ * that bypasses scsi-eh
+ */
+static void sas_ata_internal_abort(struct sas_task *task)
+{
+ struct sas_internal *si = dev_to_sas_internal(task->dev);
+ unsigned long flags;
+ int res;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
+ task->task_state_flags & SAS_TASK_STATE_DONE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
+ task);
+ goto out;
+ }
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ res = si->dft->lldd_abort_task(task);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE ||
+ res == TMF_RESP_FUNC_COMPLETE) {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ goto out;
+ }
+
+ /* XXX we are not prepared to deal with ->lldd_abort_task()
+ * failures. TODO: lldds need to unconditionally forget about
+ * aborted ata tasks, otherwise we (likely) leak the sas task
+ * here
+ */
+ SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
+
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return;
+ out:
+ list_del_init(&task->list);
+ sas_free_task(task);
+}
+
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
@@ -340,30 +496,35 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
if (qc->err_mask) {
/*
- * Find the sas_task and kill it. By this point,
- * libata has decided to kill the qc, so we needn't
- * bother with sas_ata_task_done. But we still
- * ought to abort the task.
+ * Find the sas_task and kill it. By this point, libata
+ * has decided to kill the qc and has frozen the port.
+ * In this state sas_ata_task_done() will no longer free
+ * the sas_task, so we need to notify the lldd (via
+ * ->lldd_abort_task) that the task is dead and free it
+ * ourselves.
*/
struct sas_task *task = qc->lldd_task;
- unsigned long flags;
qc->lldd_task = NULL;
- if (task) {
- /* Should this be a AT(API) device reset? */
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- task->uldd_task = NULL;
- __sas_task_abort(task);
- }
+ if (!task)
+ return;
+ task->uldd_task = NULL;
+ sas_ata_internal_abort(task);
}
}
+
+static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+
+ if (i->dft->lldd_ata_set_dmamode)
+ i->dft->lldd_ata_set_dmamode(dev);
+}
+
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
- .softreset = sas_ata_soft_reset,
.hardreset = sas_ata_hard_reset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
@@ -374,6 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
+ .set_dmamode = sas_ata_set_dmamode,
};
static struct ata_port_info sata_port_info = {
@@ -384,11 +546,10 @@ static struct ata_port_info sata_port_info = {
.port_ops = &sas_sata_ops
};
-int sas_ata_init_host_and_port(struct domain_device *found_dev,
- struct scsi_target *starget)
+int sas_ata_init_host_and_port(struct domain_device *found_dev)
{
- struct Scsi_Host *shost = dev_to_shost(&starget->dev);
- struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_ha_struct *ha = found_dev->port->ha;
+ struct Scsi_Host *shost = ha->core.shost;
struct ata_port *ap;
ata_host_init(&found_dev->sata_dev.ata_host,
@@ -406,6 +567,8 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
+ /* publish initialized ata port */
+ smp_wmb();
found_dev->sata_dev.ap = ap;
return 0;
@@ -436,168 +599,14 @@ void sas_ata_task_abort(struct sas_task *task)
complete(waiting);
}
-static void sas_task_timedout(unsigned long _task)
-{
- struct sas_task *task = (void *) _task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->completion);
-}
-
-static void sas_disc_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->timer))
- return;
- complete(&task->completion);
-}
-
-#define SAS_DEV_TIMEOUT 10
-
-/**
- * sas_execute_task -- Basic task processing for discovery
- * @task: the task to be executed
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @dma_dir: DMA direction. DMA_xxx
- */
-static int sas_execute_task(struct sas_task *task, void *buffer, int size,
- enum dma_data_direction dma_dir)
-{
- int res = 0;
- struct scatterlist *scatter = NULL;
- struct task_status_struct *ts = &task->task_status;
- int num_scatter = 0;
- int retries = 0;
- struct sas_internal *i =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
-
- if (dma_dir != DMA_NONE) {
- scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
- if (!scatter)
- goto out;
-
- sg_init_one(scatter, buffer, size);
- num_scatter = 1;
- }
-
- task->task_proto = task->dev->tproto;
- task->scatter = scatter;
- task->num_scatter = num_scatter;
- task->total_xfer_len = size;
- task->data_dir = dma_dir;
- task->task_done = sas_disc_task_done;
- if (dma_dir != DMA_NONE &&
- sas_protocol_ata(task->task_proto)) {
- task->num_scatter = dma_map_sg(task->dev->port->ha->dev,
- task->scatter,
- task->num_scatter,
- task->data_dir);
- }
-
- for (retries = 0; retries < 5; retries++) {
- task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_completion(&task->completion);
-
- task->timer.data = (unsigned long) task;
- task->timer.function = sas_task_timedout;
- task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
- add_timer(&task->timer);
-
- res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
- if (res) {
- del_timer(&task->timer);
- SAS_DPRINTK("executing SAS discovery task failed:%d\n",
- res);
- goto ex_err;
- }
- wait_for_completion(&task->completion);
- res = -ECOMM;
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- int res2;
- SAS_DPRINTK("task aborted, flags:0x%x\n",
- task->task_state_flags);
- res2 = i->dft->lldd_abort_task(task);
- SAS_DPRINTK("came back from abort task\n");
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- if (res2 == TMF_RESP_FUNC_COMPLETE)
- continue; /* Retry the task */
- else
- goto ex_err;
- }
- }
- if (task->task_status.stat == SAM_STAT_BUSY ||
- task->task_status.stat == SAM_STAT_TASK_SET_FULL ||
- task->task_status.stat == SAS_QUEUE_FULL) {
- SAS_DPRINTK("task: q busy, sleeping...\n");
- schedule_timeout_interruptible(HZ);
- } else if (task->task_status.stat == SAM_STAT_CHECK_CONDITION) {
- struct scsi_sense_hdr shdr;
-
- if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
- &shdr)) {
- SAS_DPRINTK("couldn't normalize sense\n");
- continue;
- }
- if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
- (shdr.sense_key == 2 && shdr.asc == 4 &&
- shdr.ascq == 1)) {
- SAS_DPRINTK("device %016llx LUN: %016llx "
- "powering up or not ready yet, "
- "sleeping...\n",
- SAS_ADDR(task->dev->sas_addr),
- SAS_ADDR(task->ssp_task.LUN));
-
- schedule_timeout_interruptible(5*HZ);
- } else if (shdr.sense_key == 1) {
- res = 0;
- break;
- } else if (shdr.sense_key == 5) {
- break;
- } else {
- SAS_DPRINTK("dev %016llx LUN: %016llx "
- "sense key:0x%x ASC:0x%x ASCQ:0x%x"
- "\n",
- SAS_ADDR(task->dev->sas_addr),
- SAS_ADDR(task->ssp_task.LUN),
- shdr.sense_key,
- shdr.asc, shdr.ascq);
- }
- } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
- task->task_status.stat != SAM_STAT_GOOD) {
- SAS_DPRINTK("task finished with resp:0x%x, "
- "stat:0x%x\n",
- task->task_status.resp,
- task->task_status.stat);
- goto ex_err;
- } else {
- res = 0;
- break;
- }
- }
-ex_err:
- if (dma_dir != DMA_NONE) {
- if (sas_protocol_ata(task->task_proto))
- dma_unmap_sg(task->dev->port->ha->dev,
- task->scatter, task->num_scatter,
- task->data_dir);
- kfree(scatter);
- }
-out:
- return res;
-}
-
-/* ---------- SATA ---------- */
-
static void sas_get_ata_command_set(struct domain_device *dev)
{
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
+ if (dev->dev_type == SATA_PENDING)
+ return;
+
if ((fis->sector_count == 1 && /* ATA */
fis->lbal == 1 &&
fis->lbam == 0 &&
@@ -636,224 +645,152 @@ static void sas_get_ata_command_set(struct domain_device *dev)
dev->sata_dev.command_set = ATAPI_COMMAND_SET;
}
-/**
- * sas_issue_ata_cmd -- Basic SATA command processing for discovery
- * @dev: the device to send the command to
- * @command: the command register
- * @features: the features register
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @dma_dir: DMA direction. DMA_xxx
- */
-static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
- u8 features, void *buffer, int size,
- enum dma_data_direction dma_dir)
-{
- int res = 0;
- struct sas_task *task;
- struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
- &dev->frame_rcvd[0];
-
- res = -ENOMEM;
- task = sas_alloc_task(GFP_KERNEL);
- if (!task)
- goto out;
-
- task->dev = dev;
-
- task->ata_task.fis.fis_type = 0x27;
- task->ata_task.fis.command = command;
- task->ata_task.fis.features = features;
- task->ata_task.fis.device = d2h_fis->device;
- task->ata_task.retry_count = 1;
-
- res = sas_execute_task(task, buffer, size, dma_dir);
-
- sas_free_task(task);
-out:
- return res;
-}
-
-#define ATA_IDENTIFY_DEV 0xEC
-#define ATA_IDENTIFY_PACKET_DEV 0xA1
-#define ATA_SET_FEATURES 0xEF
-#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
-
-/**
- * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
- * @dev: STP/SATA device of interest (ATA/ATAPI)
- *
- * The LLDD has already been notified of this device, so that we can
- * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
- * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
- * performance for this device.
- */
-static int sas_discover_sata_dev(struct domain_device *dev)
+void sas_probe_sata(struct asd_sas_port *port)
{
- int res;
- __le16 *identify_x;
- u8 command;
+ struct domain_device *dev, *n;
+ int err;
- identify_x = kzalloc(512, GFP_KERNEL);
- if (!identify_x)
- return -ENOMEM;
-
- if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
- dev->sata_dev.identify_device = identify_x;
- command = ATA_IDENTIFY_DEV;
- } else {
- dev->sata_dev.identify_packet_device = identify_x;
- command = ATA_IDENTIFY_PACKET_DEV;
- }
+ mutex_lock(&port->ha->disco_mutex);
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
- res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
- DMA_FROM_DEVICE);
- if (res)
- goto out_err;
-
- /* lives on the media? */
- if (le16_to_cpu(identify_x[0]) & 4) {
- /* incomplete response */
- SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
- "dev %llx\n", SAS_ADDR(dev->sas_addr));
- if (!(identify_x[83] & cpu_to_le16(1<<6)))
- goto cont1;
- res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
- ATA_FEATURE_PUP_STBY_SPIN_UP,
- NULL, 0, DMA_NONE);
- if (res)
- goto cont1;
-
- schedule_timeout_interruptible(5*HZ); /* More time? */
- res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
- DMA_FROM_DEVICE);
- if (res)
- goto out_err;
+ err = sas_ata_init_host_and_port(dev);
+ if (err)
+ sas_fail_probe(dev, __func__, err);
+ else
+ ata_sas_async_port_init(dev->sata_dev.ap);
}
-cont1:
- /* XXX Hint: register this SATA device with SATL.
- When this returns, dev->sata_dev->lu is alive and
- present.
- sas_satl_register_dev(dev);
- */
+ mutex_unlock(&port->ha->disco_mutex);
- sas_fill_in_rphy(dev, dev->rphy);
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ if (!dev_is_sata(dev))
+ continue;
- return 0;
-out_err:
- dev->sata_dev.identify_packet_device = NULL;
- dev->sata_dev.identify_device = NULL;
- kfree(identify_x);
- return res;
-}
+ sas_ata_wait_eh(dev);
-static int sas_discover_sata_pm(struct domain_device *dev)
-{
- return -ENODEV;
+ /* if libata could not bring the link up, don't surface
+ * the device
+ */
+ if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ sas_fail_probe(dev, __func__, -ENODEV);
+ }
}
/**
* sas_discover_sata -- discover an STP/SATA domain device
* @dev: pointer to struct domain_device of interest
*
- * First we notify the LLDD of this device, so we can send frames to
- * it. Then depending on the type of device we call the appropriate
- * discover functions. Once device discover is done, we notify the
- * LLDD so that it can fine-tune its parameters for the device, by
- * removing it and then adding it. That is, the second time around,
- * the driver would have certain fields, that it is looking at, set.
- * Finally we initialize the kobj so that the device can be added to
- * the system at registration time. Devices directly attached to a HA
- * port, have no parents. All other devices do, and should have their
- * "parent" pointer set appropriately before calling this function.
+ * Devices directly attached to a HA port, have no parents. All other
+ * devices do, and should have their "parent" pointer set appropriately
+ * before calling this function.
*/
int sas_discover_sata(struct domain_device *dev)
{
int res;
+ if (dev->dev_type == SATA_PM)
+ return -ENODEV;
+
sas_get_ata_command_set(dev);
+ sas_fill_in_rphy(dev, dev->rphy);
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- switch (dev->dev_type) {
- case SATA_DEV:
- res = sas_discover_sata_dev(dev);
- break;
- case SATA_PM:
- res = sas_discover_sata_pm(dev);
- break;
- default:
- break;
- }
- sas_notify_lldd_dev_gone(dev);
- if (!res) {
- sas_notify_lldd_dev_found(dev);
- res = sas_rphy_add(dev->rphy);
- }
-
- return res;
+ sas_discover_event(dev->port, DISCE_PROBE);
+ return 0;
}
-void sas_ata_strategy_handler(struct Scsi_Host *shost)
+static void async_sas_ata_eh(void *data, async_cookie_t cookie)
{
- struct scsi_device *sdev;
+ struct domain_device *dev = data;
+ struct ata_port *ap = dev->sata_dev.ap;
+ struct sas_ha_struct *ha = dev->port->ha;
- shost_for_each_device(sdev, shost) {
- struct domain_device *ddev = sdev_to_domain_dev(sdev);
- struct ata_port *ap = ddev->sata_dev.ap;
+ /* hold a reference over eh since we may be racing with final
+ * remove once all commands are completed
+ */
+ kref_get(&dev->kref);
+ sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
+ ata_scsi_port_error_handler(ha->core.shost, ap);
+ sas_put_device(dev);
+}
- if (!dev_is_sata(ddev))
- continue;
+static bool sas_ata_dev_eh_valid(struct domain_device *dev)
+{
+ struct ata_port *ap;
- ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
- ata_scsi_port_error_handler(shost, ap);
- }
+ if (!dev_is_sata(dev))
+ return false;
+ ap = dev->sata_dev.ap;
+ /* consume fully initialized ata ports */
+ smp_rmb();
+ return !!ap;
}
-int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
- enum blk_eh_timer_return *rtn)
+void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
- struct domain_device *ddev = cmd_to_domain_dev(cmd);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ LIST_HEAD(async);
+ int i;
+
+ /* it's ok to defer revalidation events during ata eh, these
+ * disks are in one of three states:
+ * 1/ present for initial domain discovery, and these
+ * resets will cause bcn flutters
+ * 2/ hot removed, we'll discover that after eh fails
+ * 3/ hot added after initial discovery, lost the race, and need
+ * to catch the next train.
+ */
+ sas_disable_revalidation(sas_ha);
- if (!dev_is_sata(ddev) || task)
- return 0;
+ spin_lock_irq(&sas_ha->phy_port_lock);
+ for (i = 0; i < sas_ha->num_phys; i++) {
+ struct asd_sas_port *port = sas_ha->sas_port[i];
+ struct domain_device *dev;
- /* we're a sata device with no task, so this must be a libata
- * eh timeout. Ideally should hook into libata timeout
- * handling, but there's no point, it just wants to activate
- * the eh thread */
- *rtn = BLK_EH_NOT_HANDLED;
- return 1;
+ spin_lock(&port->dev_list_lock);
+ list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+ if (!sas_ata_dev_eh_valid(dev))
+ continue;
+ async_schedule_domain(async_sas_ata_eh, dev, &async);
+ }
+ spin_unlock(&port->dev_list_lock);
+ }
+ spin_unlock_irq(&sas_ha->phy_port_lock);
+
+ async_synchronize_full_domain(&async);
+
+ sas_enable_revalidation(sas_ha);
}
-int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q)
{
- int rtn = 0;
struct scsi_cmnd *cmd, *n;
- struct ata_port *ap;
+ struct domain_device *eh_dev;
do {
LIST_HEAD(sata_q);
-
- ap = NULL;
+ eh_dev = NULL;
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *ddev = cmd_to_domain_dev(cmd);
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
continue;
- if (ap && ap != ddev->sata_dev.ap)
+ if (eh_dev && eh_dev != ddev)
continue;
- ap = ddev->sata_dev.ap;
- rtn = 1;
+ eh_dev = ddev;
list_move(&cmd->eh_entry, &sata_q);
}
if (!list_empty(&sata_q)) {
- ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
+ struct ata_port *ap = eh_dev->sata_dev.ap;
+
+ sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
/*
* ata's error handler may leave the cmd on the list
@@ -869,7 +806,36 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
while (!list_empty(&sata_q))
list_del_init(sata_q.next);
}
- } while (ap);
+ } while (eh_dev);
+}
+
+void sas_ata_schedule_reset(struct domain_device *dev)
+{
+ struct ata_eh_info *ehi;
+ struct ata_port *ap;
+ unsigned long flags;
+
+ if (!dev_is_sata(dev))
+ return;
+
+ ap = dev->sata_dev.ap;
+ ehi = &ap->link.eh_info;
+
+ spin_lock_irqsave(ap->lock, flags);
+ ehi->err_mask |= AC_ERR_TIMEOUT;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_schedule_eh(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
+
+void sas_ata_wait_eh(struct domain_device *dev)
+{
+ struct ata_port *ap;
+
+ if (!dev_is_sata(dev))
+ return;
- return rtn;
+ ap = dev->sata_dev.ap;
+ ata_port_wait_eh(ap);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 54a5199ceb56..364679675602 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -30,29 +30,30 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
+#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
void sas_init_dev(struct domain_device *dev)
{
- INIT_LIST_HEAD(&dev->siblings);
- INIT_LIST_HEAD(&dev->dev_list_node);
- switch (dev->dev_type) {
- case SAS_END_DEV:
- break;
- case EDGE_DEV:
- case FANOUT_DEV:
- INIT_LIST_HEAD(&dev->ex_dev.children);
- break;
- case SATA_DEV:
- case SATA_PM:
- case SATA_PM_PORT:
- INIT_LIST_HEAD(&dev->sata_dev.children);
- break;
- default:
- break;
- }
+ switch (dev->dev_type) {
+ case SAS_END_DEV:
+ break;
+ case EDGE_DEV:
+ case FANOUT_DEV:
+ INIT_LIST_HEAD(&dev->ex_dev.children);
+ mutex_init(&dev->ex_dev.cmd_mutex);
+ break;
+ case SATA_DEV:
+ case SATA_PM:
+ case SATA_PM_PORT:
+ case SATA_PENDING:
+ INIT_LIST_HEAD(&dev->sata_dev.children);
+ break;
+ default:
+ break;
+ }
}
/* ---------- Domain device discovery ---------- */
@@ -68,19 +69,18 @@ void sas_init_dev(struct domain_device *dev)
*/
static int sas_get_port_device(struct asd_sas_port *port)
{
- unsigned long flags;
struct asd_sas_phy *phy;
struct sas_rphy *rphy;
struct domain_device *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = sas_alloc_device();
if (!dev)
return -ENOMEM;
- spin_lock_irqsave(&port->phy_list_lock, flags);
+ spin_lock_irq(&port->phy_list_lock);
if (list_empty(&port->phy_list)) {
- spin_unlock_irqrestore(&port->phy_list_lock, flags);
- kfree(dev);
+ spin_unlock_irq(&port->phy_list_lock);
+ sas_put_device(dev);
return -ENODEV;
}
phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
@@ -88,7 +88,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
(size_t)phy->frame_rcvd_size));
spin_unlock(&phy->frame_rcvd_lock);
- spin_unlock_irqrestore(&port->phy_list_lock, flags);
+ spin_unlock_irq(&port->phy_list_lock);
if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
struct dev_to_host_fis *fis =
@@ -130,9 +130,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
}
if (!rphy) {
- kfree(dev);
+ sas_put_device(dev);
return -ENODEV;
}
+
+ spin_lock_irq(&port->phy_list_lock);
+ list_for_each_entry(phy, &port->phy_list, port_phy_el)
+ sas_phy_set_target(phy, dev);
+ spin_unlock_irq(&port->phy_list_lock);
rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
sas_fill_in_rphy(dev, rphy);
@@ -147,11 +152,17 @@ static int sas_get_port_device(struct asd_sas_port *port)
memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
port->disc.max_level = 0;
+ sas_device_set_phy(dev, port->port);
dev->rphy = rphy;
- spin_lock_irq(&port->dev_list_lock);
- list_add_tail(&dev->dev_list_node, &port->dev_list);
- spin_unlock_irq(&port->dev_list_lock);
+
+ if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+ list_add_tail(&dev->disco_list_node, &port->disco_list);
+ else {
+ spin_lock_irq(&port->dev_list_lock);
+ list_add_tail(&dev->dev_list_node, &port->dev_list);
+ spin_unlock_irq(&port->dev_list_lock);
+ }
return 0;
}
@@ -173,6 +184,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
+ kref_get(&dev->kref);
}
return res;
}
@@ -184,12 +196,40 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
- if (i->dft->lldd_dev_gone)
+ if (i->dft->lldd_dev_gone) {
i->dft->lldd_dev_gone(dev);
+ sas_put_device(dev);
+ }
}
-/* ---------- Common/dispatchers ---------- */
+static void sas_probe_devices(struct work_struct *work)
+{
+ struct domain_device *dev, *n;
+ struct sas_discovery_event *ev =
+ container_of(work, struct sas_discovery_event, work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_PROBE, &port->disc.pending);
+ /* devices must be domain members before link recovery and probe */
+ list_for_each_entry(dev, &port->disco_list, disco_list_node) {
+ spin_lock_irq(&port->dev_list_lock);
+ list_add_tail(&dev->dev_list_node, &port->dev_list);
+ spin_unlock_irq(&port->dev_list_lock);
+ }
+
+ sas_probe_sata(port);
+
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ int err;
+
+ err = sas_rphy_add(dev->rphy);
+ if (err)
+ sas_fail_probe(dev, __func__, err);
+ else
+ list_del_init(&dev->disco_list_node);
+ }
+}
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
@@ -203,22 +243,36 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
- goto out_err2;
-
- res = sas_rphy_add(dev->rphy);
- if (res)
- goto out_err;
+ return res;
+ sas_discover_event(dev->port, DISCE_PROBE);
return 0;
-
-out_err:
- sas_notify_lldd_dev_gone(dev);
-out_err2:
- return res;
}
/* ---------- Device registration and unregistration ---------- */
+void sas_free_device(struct kref *kref)
+{
+ struct domain_device *dev = container_of(kref, typeof(*dev), kref);
+
+ if (dev->parent)
+ sas_put_device(dev->parent);
+
+ sas_port_put_phy(dev->phy);
+ dev->phy = NULL;
+
+ /* remove the phys and ports, everything else should be gone */
+ if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+ kfree(dev->ex_dev.ex_phy);
+
+ if (dev_is_sata(dev) && dev->sata_dev.ap) {
+ ata_sas_port_destroy(dev->sata_dev.ap);
+ dev->sata_dev.ap = NULL;
+ }
+
+ kfree(dev);
+}
+
static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
{
sas_notify_lldd_dev_gone(dev);
@@ -230,34 +284,84 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock_irq(&port->dev_list_lock);
+
+ sas_put_device(dev);
}
-void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
+static void sas_destruct_devices(struct work_struct *work)
{
- if (dev->rphy) {
+ struct domain_device *dev, *n;
+ struct sas_discovery_event *ev =
+ container_of(work, struct sas_discovery_event, work);
+ struct asd_sas_port *port = ev->port;
+
+ clear_bit(DISCE_DESTRUCT, &port->disc.pending);
+
+ list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
+ list_del_init(&dev->disco_list_node);
+
sas_remove_children(&dev->rphy->dev);
sas_rphy_delete(dev->rphy);
dev->rphy = NULL;
+ sas_unregister_common_dev(port, dev);
}
- if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
- /* remove the phys and ports, everything else should be gone */
- kfree(dev->ex_dev.ex_phy);
- dev->ex_dev.ex_phy = NULL;
+}
+
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
+{
+ if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
+ !list_empty(&dev->disco_list_node)) {
+ /* this rphy never saw sas_rphy_add */
+ list_del_init(&dev->disco_list_node);
+ sas_rphy_free(dev->rphy);
+ dev->rphy = NULL;
+ sas_unregister_common_dev(port, dev);
+ }
+
+ if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+ sas_rphy_unlink(dev->rphy);
+ list_move_tail(&dev->disco_list_node, &port->destroy_list);
+ sas_discover_event(dev->port, DISCE_DESTRUCT);
}
- sas_unregister_common_dev(port, dev);
}
-void sas_unregister_domain_devices(struct asd_sas_port *port)
+void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
{
struct domain_device *dev, *n;
- list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node)
+ list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
+ if (gone)
+ set_bit(SAS_DEV_GONE, &dev->state);
+ sas_unregister_dev(port, dev);
+ }
+
+ list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
sas_unregister_dev(port, dev);
port->port->rphy = NULL;
}
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
+{
+ struct sas_ha_struct *ha;
+ struct sas_phy *new_phy;
+
+ if (!dev)
+ return;
+
+ ha = dev->port->ha;
+ new_phy = sas_port_get_phy(port);
+
+ /* pin and record last seen phy */
+ spin_lock_irq(&ha->phy_port_lock);
+ if (new_phy) {
+ sas_port_put_phy(dev->phy);
+ dev->phy = new_phy;
+ }
+ spin_unlock_irq(&ha->phy_port_lock);
+}
+
/* ---------- Discovery and Revalidation ---------- */
/**
@@ -277,8 +381,7 @@ static void sas_discover_domain(struct work_struct *work)
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
- sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
- &port->disc.pending);
+ clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
if (port->port_dev)
return;
@@ -318,11 +421,12 @@ static void sas_discover_domain(struct work_struct *work)
sas_rphy_free(dev->rphy);
dev->rphy = NULL;
+ list_del_init(&dev->disco_list_node);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock_irq(&port->dev_list_lock);
- kfree(dev); /* not kobject_register-ed yet */
+ sas_put_device(dev);
port->port_dev = NULL;
}
@@ -336,21 +440,51 @@ static void sas_revalidate_domain(struct work_struct *work)
struct sas_discovery_event *ev =
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
+ struct sas_ha_struct *ha = port->ha;
+
+ /* prevent revalidation from finding sata links in recovery */
+ mutex_lock(&ha->disco_mutex);
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
+ SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
+ port->id, task_pid_nr(current));
+ goto out;
+ }
- sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
- &port->disc.pending);
+ clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
+
if (port->port_dev)
res = sas_ex_revalidate_domain(port->port_dev);
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
+ out:
+ mutex_unlock(&ha->disco_mutex);
}
/* ---------- Events ---------- */
+static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+ /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
+ scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_chain_event(int event, unsigned long *pending,
+ struct work_struct *work,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->state_lock, flags);
+ sas_chain_work(ha, work);
+ spin_unlock_irqrestore(&ha->state_lock, flags);
+ }
+}
+
int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
@@ -361,8 +495,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
BUG_ON(ev >= DISC_NUM_EVENTS);
- sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
- &disc->disc_work[ev].work, port->ha);
+ sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
return 0;
}
@@ -380,9 +513,10 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
+ [DISCE_PROBE] = sas_probe_devices,
+ [DISCE_DESTRUCT] = sas_destruct_devices,
};
- spin_lock_init(&disc->disc_event_lock);
disc->pending = 0;
for (i = 0; i < DISC_NUM_EVENTS; i++) {
INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 9db30fb5caf2..16639bbae629 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -22,15 +22,103 @@
*
*/
+#include <linux/export.h>
#include <scsi/scsi_host.h>
#include "sas_internal.h"
#include "sas_dump.h"
+void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
+{
+ if (!test_bit(SAS_HA_REGISTERED, &ha->state))
+ return;
+
+ if (test_bit(SAS_HA_DRAINING, &ha->state))
+ list_add(&work->entry, &ha->defer_q);
+ else
+ scsi_queue_work(ha->core.shost, work);
+}
+
+static void sas_queue_event(int event, unsigned long *pending,
+ struct work_struct *work,
+ struct sas_ha_struct *ha)
+{
+ if (!test_and_set_bit(event, pending)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->state_lock, flags);
+ sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->state_lock, flags);
+ }
+}
+
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
+ struct workqueue_struct *wq = ha->core.shost->work_q;
+ struct work_struct *w, *_w;
+
+ set_bit(SAS_HA_DRAINING, &ha->state);
+ /* flush submitters */
+ spin_lock_irq(&ha->state_lock);
+ spin_unlock_irq(&ha->state_lock);
+
+ drain_workqueue(wq);
+
+ spin_lock_irq(&ha->state_lock);
+ clear_bit(SAS_HA_DRAINING, &ha->state);
+ list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
+ list_del_init(&w->entry);
+ sas_queue_work(ha, w);
+ }
+ spin_unlock_irq(&ha->state_lock);
+}
+
+int sas_drain_work(struct sas_ha_struct *ha)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ha->drain_mutex);
+ if (err)
+ return err;
+ if (test_bit(SAS_HA_REGISTERED, &ha->state))
+ __sas_drain_work(ha);
+ mutex_unlock(&ha->drain_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sas_drain_work);
+
+void sas_disable_revalidation(struct sas_ha_struct *ha)
+{
+ mutex_lock(&ha->disco_mutex);
+ set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+ mutex_unlock(&ha->disco_mutex);
+}
+
+void sas_enable_revalidation(struct sas_ha_struct *ha)
+{
+ int i;
+
+ mutex_lock(&ha->disco_mutex);
+ clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ const int ev = DISCE_REVALIDATE_DOMAIN;
+ struct sas_discovery *d = &port->disc;
+
+ if (!test_and_clear_bit(ev, &d->pending))
+ continue;
+
+ sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ }
+ mutex_unlock(&ha->disco_mutex);
+}
+
static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
{
BUG_ON(event >= HA_NUM_EVENTS);
- sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
+ sas_queue_event(event, &sas_ha->pending,
&sas_ha->ha_events[event].work, sas_ha);
}
@@ -40,7 +128,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
BUG_ON(event >= PORT_NUM_EVENTS);
- sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
+ sas_queue_event(event, &phy->port_events_pending,
&phy->port_events[event].work, ha);
}
@@ -50,7 +138,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
BUG_ON(event >= PHY_NUM_EVENTS);
- sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
+ sas_queue_event(event, &phy->phy_events_pending,
&phy->phy_events[event].work, ha);
}
@@ -62,8 +150,6 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
int i;
- spin_lock_init(&sas_ha->event_lock);
-
for (i = 0; i < HA_NUM_EVENTS; i++) {
INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
sas_ha->ha_events[i].ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 1b831c55ec6e..05acd9e35fc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -28,6 +28,7 @@
#include "sas_internal.h"
+#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h"
@@ -71,11 +72,18 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
+ mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
+ if (test_bit(SAS_DEV_GONE, &dev->state)) {
+ res = -ECOMM;
+ break;
+ }
+ task = sas_alloc_task(GFP_KERNEL);
+ if (!task) {
+ res = -ENOMEM;
+ break;
+ }
task->dev = dev;
task->task_proto = dev->tproto;
sg_init_one(&task->smp_task.smp_req, req, req_size);
@@ -93,7 +101,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
if (res) {
del_timer(&task->timer);
SAS_DPRINTK("executing SMP task failed:%d\n", res);
- goto ex_err;
+ break;
}
wait_for_completion(&task->completion);
@@ -103,24 +111,30 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
i->dft->lldd_abort_task(task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
SAS_DPRINTK("SMP task aborted and not done\n");
- goto ex_err;
+ break;
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAM_STAT_GOOD) {
res = 0;
break;
- } if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
/* no error, but return the number of bytes of
* underrun */
res = task->task_status.residual;
break;
- } if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
res = -EMSGSIZE;
break;
- } else {
+ }
+ if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
+ task->task_status.stat == SAS_DEVICE_UNKNOWN)
+ break;
+ else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
"status 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr),
@@ -130,11 +144,10 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
task = NULL;
}
}
-ex_err:
+ mutex_unlock(&dev->ex_dev.cmd_mutex);
+
BUG_ON(retry == 3 && task != NULL);
- if (task != NULL) {
- sas_free_task(task);
- }
+ sas_free_task(task);
return res;
}
@@ -153,19 +166,49 @@ static inline void *alloc_smp_resp(int size)
return kzalloc(size, GFP_KERNEL);
}
-/* ---------- Expander configuration ---------- */
+static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
+{
+ switch (phy->routing_attr) {
+ case TABLE_ROUTING:
+ if (dev->ex_dev.t2t_supp)
+ return 'U';
+ else
+ return 'T';
+ case DIRECT_ROUTING:
+ return 'D';
+ case SUBTRACTIVE_ROUTING:
+ return 'S';
+ default:
+ return '?';
+ }
+}
+
+static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+{
+ /* This is detecting a failure to transmit initial dev to host
+ * FIS as described in section J.5 of sas-2 r16
+ */
+ if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+ dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
+ return SATA_PENDING;
+ else
+ return dr->attached_dev_type;
+}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
- void *disc_resp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
{
+ enum sas_dev_type dev_type;
+ enum sas_linkrate linkrate;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ struct smp_resp *resp = rsp;
+ struct discover_resp *dr = &resp->disc;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
- struct smp_resp *resp = disc_resp;
- struct discover_resp *dr = &resp->disc;
struct sas_rphy *rphy = dev->rphy;
- int rediscover = (phy->phy != NULL);
+ bool new_phy = !phy->phy;
+ char *type;
- if (!rediscover) {
+ if (new_phy) {
phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
/* FIXME: error_handling */
@@ -184,8 +227,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
break;
}
+ /* check if anything important changed to squelch debug */
+ dev_type = phy->attached_dev_type;
+ linkrate = phy->linkrate;
+ memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+
+ phy->attached_dev_type = to_dev_type(dr);
phy->phy_id = phy_id;
- phy->attached_dev_type = dr->attached_dev_type;
phy->linkrate = dr->linkrate;
phy->attached_sata_host = dr->attached_sata_host;
phy->attached_sata_dev = dr->attached_sata_dev;
@@ -200,9 +248,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
phy->last_da_index = -1;
phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
- phy->phy->identify.device_type = phy->attached_dev_type;
+ phy->phy->identify.device_type = dr->attached_dev_type;
phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
phy->phy->identify.target_port_protocols = phy->attached_tproto;
+ if (!phy->attached_tproto && dr->attached_sata_dev)
+ phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
phy->phy->identify.phy_identifier = phy_id;
phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
@@ -210,20 +260,76 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
- if (!rediscover)
+ if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
return;
}
- SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
+ switch (phy->attached_dev_type) {
+ case SATA_PENDING:
+ type = "stp pending";
+ break;
+ case NO_DEVICE:
+ type = "no device";
+ break;
+ case SAS_END_DEV:
+ if (phy->attached_iproto) {
+ if (phy->attached_tproto)
+ type = "host+target";
+ else
+ type = "host";
+ } else {
+ if (dr->attached_sata_dev)
+ type = "stp";
+ else
+ type = "ssp";
+ }
+ break;
+ case EDGE_DEV:
+ case FANOUT_DEV:
+ type = "smp";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ /* this routine is polled by libata error recovery so filter
+ * unimportant messages
+ */
+ if (new_phy || phy->attached_dev_type != dev_type ||
+ phy->linkrate != linkrate ||
+ SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
+ /* pass */;
+ else
+ return;
+
+ SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
SAS_ADDR(dev->sas_addr), phy->phy_id,
- phy->routing_attr == TABLE_ROUTING ? 'T' :
- phy->routing_attr == DIRECT_ROUTING ? 'D' :
- phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
- SAS_ADDR(phy->attached_sas_addr));
+ sas_route_char(dev, phy), phy->linkrate,
+ SAS_ADDR(phy->attached_sas_addr), type);
+}
+
+/* check if we have an existing attached ata device on this expander phy */
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
+{
+ struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *dev;
+ struct sas_rphy *rphy;
+
+ if (!ex_phy->port)
+ return NULL;
- return;
+ rphy = ex_phy->port->rphy;
+ if (!rphy)
+ return NULL;
+
+ dev = sas_find_dev_by_rphy(rphy);
+
+ if (dev && dev_is_sata(dev))
+ return dev;
+
+ return NULL;
}
#define DISCOVER_REQ_SIZE 16
@@ -232,39 +338,25 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
u8 *disc_resp, int single)
{
- int i, res;
+ struct discover_resp *dr;
+ int res;
disc_req[9] = single;
- for (i = 1 ; i < 3; i++) {
- struct discover_resp *dr;
- res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
- disc_resp, DISCOVER_RESP_SIZE);
- if (res)
- return res;
- /* This is detecting a failure to transmit initial
- * dev to host FIS as described in section G.5 of
- * sas-2 r 04b */
- dr = &((struct smp_resp *)disc_resp)->disc;
- if (memcmp(dev->sas_addr, dr->attached_sas_addr,
- SAS_ADDR_SIZE) == 0) {
- sas_printk("Found loopback topology, just ignore it!\n");
- return 0;
- }
- if (!(dr->attached_dev_type == 0 &&
- dr->attached_sata_dev))
- break;
- /* In order to generate the dev to host FIS, we
- * send a link reset to the expander port */
- sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
- /* Wait for the reset to trigger the negotiation */
- msleep(500);
+ res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (res)
+ return res;
+ dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
}
sas_set_ex_phy(dev, single, disc_resp);
return 0;
}
-static int sas_ex_phy_discover(struct domain_device *dev, int single)
+int sas_ex_phy_discover(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
int res = 0;
@@ -569,9 +661,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#define RPS_REQ_SIZE 16
#define RPS_RESP_SIZE 60
-static int sas_get_report_phy_sata(struct domain_device *dev,
- int phy_id,
- struct smp_resp *rps_resp)
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ struct smp_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -657,10 +748,11 @@ static struct domain_device *sas_ex_discover_end_dev(
if (phy->attached_sata_host || phy->attached_sata_ps)
return NULL;
- child = kzalloc(sizeof(*child), GFP_KERNEL);
+ child = sas_alloc_device();
if (!child)
return NULL;
+ kref_get(&parent->kref);
child->parent = parent;
child->port = parent->port;
child->iproto = phy->attached_iproto;
@@ -676,24 +768,13 @@ static struct domain_device *sas_ex_discover_end_dev(
}
}
sas_ex_get_linkrate(parent, child, phy);
+ sas_device_set_phy(child, phy->port);
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
- child->dev_type = SATA_DEV;
- if (phy->attached_tproto & SAS_PROTOCOL_STP)
- child->tproto = phy->attached_tproto;
- if (phy->attached_sata_dev)
- child->tproto |= SATA_DEV;
- res = sas_get_report_phy_sata(parent, phy_id,
- &child->sata_dev.rps_resp);
- if (res) {
- SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
- "0x%x\n", SAS_ADDR(parent->sas_addr),
- phy_id, res);
+ res = sas_get_ata_info(child, phy);
+ if (res)
goto out_free;
- }
- memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
- sizeof(struct dev_to_host_fis));
rphy = sas_end_device_alloc(phy->port);
if (unlikely(!rphy))
@@ -703,9 +784,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
- spin_lock_irq(&parent->port->dev_list_lock);
- list_add_tail(&child->dev_list_node, &parent->port->dev_list);
- spin_unlock_irq(&parent->port->dev_list_lock);
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
res = sas_discover_sata(child);
if (res) {
@@ -729,9 +808,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
sas_fill_in_rphy(child, rphy);
- spin_lock_irq(&parent->port->dev_list_lock);
- list_add_tail(&child->dev_list_node, &parent->port->dev_list);
- spin_unlock_irq(&parent->port->dev_list_lock);
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
res = sas_discover_end_dev(child);
if (res) {
@@ -755,6 +832,7 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_rphy_free(child->rphy);
child->rphy = NULL;
+ list_del(&child->disco_list_node);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
@@ -762,7 +840,7 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_port_delete(phy->port);
out_err:
phy->port = NULL;
- kfree(child);
+ sas_put_device(child);
return NULL;
}
@@ -809,7 +887,7 @@ static struct domain_device *sas_ex_discover_expander(
phy->attached_phy_id);
return NULL;
}
- child = kzalloc(sizeof(*child), GFP_KERNEL);
+ child = sas_alloc_device();
if (!child)
return NULL;
@@ -835,6 +913,7 @@ static struct domain_device *sas_ex_discover_expander(
child->rphy = rphy;
edev = rphy_to_expander_device(rphy);
child->dev_type = phy->attached_dev_type;
+ kref_get(&parent->kref);
child->parent = parent;
child->port = port;
child->iproto = phy->attached_iproto;
@@ -858,7 +937,7 @@ static struct domain_device *sas_ex_discover_expander(
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
- kfree(child);
+ sas_put_device(child);
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
@@ -908,7 +987,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
if (ex_phy->attached_dev_type != SAS_END_DEV &&
ex_phy->attached_dev_type != FANOUT_DEV &&
- ex_phy->attached_dev_type != EDGE_DEV) {
+ ex_phy->attached_dev_type != EDGE_DEV &&
+ ex_phy->attached_dev_type != SATA_PENDING) {
SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
"phy 0x%x\n", ex_phy->attached_dev_type,
SAS_ADDR(dev->sas_addr),
@@ -934,6 +1014,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
switch (ex_phy->attached_dev_type) {
case SAS_END_DEV:
+ case SATA_PENDING:
child = sas_ex_discover_end_dev(dev, phy_id);
break;
case FANOUT_DEV:
@@ -1128,32 +1209,25 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
struct ex_phy *parent_phy,
struct ex_phy *child_phy)
{
- static const char ra_char[] = {
- [DIRECT_ROUTING] = 'D',
- [SUBTRACTIVE_ROUTING] = 'S',
- [TABLE_ROUTING] = 'T',
- };
static const char *ex_type[] = {
[EDGE_DEV] = "edge",
[FANOUT_DEV] = "fanout",
};
struct domain_device *parent = child->parent;
- sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
- "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
+ sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
+ "phy 0x%x has %c:%c routing link!\n",
ex_type[parent->dev_type],
SAS_ADDR(parent->sas_addr),
- parent->ex_dev.t2t_supp,
parent_phy->phy_id,
ex_type[child->dev_type],
SAS_ADDR(child->sas_addr),
- child->ex_dev.t2t_supp,
child_phy->phy_id,
- ra_char[parent_phy->routing_attr],
- ra_char[child_phy->routing_attr]);
+ sas_route_char(parent, parent_phy),
+ sas_route_char(child, child_phy));
}
static int sas_check_eeds(struct domain_device *child,
@@ -1610,8 +1684,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
return res;
}
-static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
- int phy_id, u8 *attached_sas_addr)
+static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+ u8 *sas_addr, enum sas_dev_type *type)
{
int res;
struct smp_resp *disc_resp;
@@ -1623,10 +1697,11 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
- if (!res) {
- memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
- if (dr->attached_dev_type == 0)
- memset(attached_sas_addr, 0, 8);
+ if (res == 0) {
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+ *type = to_dev_type(dr);
+ if (*type == 0)
+ memset(sas_addr, 0, 8);
}
kfree(disc_resp);
return res;
@@ -1748,7 +1823,7 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
struct domain_device *child, *n;
list_for_each_entry_safe(child, n, &ex->children, siblings) {
- child->gone = 1;
+ set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
sas_unregister_ex_tree(port, child);
@@ -1763,27 +1838,28 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
{
struct expander_device *ex_dev = &parent->ex_dev;
struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
- struct domain_device *child, *n;
+ struct domain_device *child, *n, *found = NULL;
if (last) {
list_for_each_entry_safe(child, n,
&ex_dev->children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
- child->gone = 1;
+ set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
+ found = child;
break;
}
}
- parent->gone = 1;
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
+ sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
sas_port_delete(phy->port);
phy->port = NULL;
@@ -1874,39 +1950,71 @@ out:
return res;
}
+static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+{
+ if (old == new)
+ return true;
+
+ /* treat device directed resets as flutter, if we went
+ * SAS_END_DEV to SATA_PENDING the link needs recovery
+ */
+ if ((old == SATA_PENDING && new == SAS_END_DEV) ||
+ (old == SAS_END_DEV && new == SATA_PENDING))
+ return true;
+
+ return false;
+}
+
static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
- u8 attached_sas_addr[8];
+ enum sas_dev_type type = NO_DEVICE;
+ u8 sas_addr[8];
int res;
- res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
+ res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- goto out; break;
+ return res;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- goto out; break;
+ return res;
case SMP_RESP_FUNC_ACC:
break;
}
- if (SAS_ADDR(attached_sas_addr) == 0) {
+ if (SAS_ADDR(sas_addr) == 0) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- } else if (SAS_ADDR(attached_sas_addr) ==
- SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
- SAS_ADDR(dev->sas_addr), phy_id);
+ return res;
+ } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+ dev_type_flutter(type, phy->attached_dev_type)) {
+ struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
+ char *action = "";
+
sas_ex_phy_discover(dev, phy_id);
- } else
- res = sas_discover_new(dev, phy_id);
-out:
- return res;
+
+ if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+ action = ", needs recovery";
+ SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
+ SAS_ADDR(dev->sas_addr), phy_id, action);
+ return res;
+ }
+
+ /* delete the old link */
+ if (SAS_ADDR(phy->attached_sas_addr) &&
+ SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ }
+
+ return sas_discover_new(dev, phy_id);
}
/**
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index bb8f49269a68..d24792575169 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -187,11 +187,14 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
struct sas_phy_linkrates rates;
+ struct asd_sas_phy *asd_phy;
if (phy_id >= sas_ha->num_phys) {
resp_data[2] = SMP_RESP_NO_PHY;
return;
}
+
+ asd_phy = sas_ha->sas_phy[phy_id];
switch (phy_op) {
case PHY_FUNC_NOP:
case PHY_FUNC_LINK_RESET:
@@ -210,7 +213,13 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
rates.minimum_linkrate = min;
rates.maximum_linkrate = max;
- if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
+ /* filter reset requests through libata eh */
+ if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
+ resp_data[2] = SMP_RESP_FUNC_ACC;
+ return;
+ }
+
+ if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
resp_data[2] = SMP_RESP_FUNC_FAILED;
else
resp_data[2] = SMP_RESP_FUNC_ACC;
@@ -246,9 +255,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
}
local_irq_disable();
- buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
+ buf = kmap_atomic(bio_page(req->bio));
memcpy(req_data, buf, blk_rq_bytes(req));
- kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
+ kunmap_atomic(buf - bio_offset(req->bio));
local_irq_enable();
if (req_data[0] != SMP_REQUEST)
@@ -361,10 +370,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
}
local_irq_disable();
- buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
+ buf = kmap_atomic(bio_page(rsp->bio));
memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio));
- kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
+ kunmap_atomic(buf - bio_offset(rsp->bio));
local_irq_enable();
out:
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index d81c3b1989f7..120bff64be30 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <scsi/sas_ata.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
@@ -97,14 +98,14 @@ void sas_hae_reset(struct work_struct *work)
container_of(work, struct sas_ha_event, work);
struct sas_ha_struct *ha = ev->ha;
- sas_begin_event(HAE_RESET, &ha->event_lock,
- &ha->pending);
+ clear_bit(HAE_RESET, &ha->pending);
}
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
int error = 0;
+ mutex_init(&sas_ha->disco_mutex);
spin_lock_init(&sas_ha->phy_port_lock);
sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
@@ -113,8 +114,10 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
else if (sas_ha->lldd_queue_size == -1)
sas_ha->lldd_queue_size = 128; /* Sanity */
- sas_ha->state = SAS_HA_REGISTERED;
+ set_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_lock_init(&sas_ha->state_lock);
+ mutex_init(&sas_ha->drain_mutex);
+ INIT_LIST_HEAD(&sas_ha->defer_q);
error = sas_register_phys(sas_ha);
if (error) {
@@ -144,6 +147,7 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
}
INIT_LIST_HEAD(&sas_ha->eh_done_q);
+ INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
@@ -156,17 +160,23 @@ Undo_phys:
int sas_unregister_ha(struct sas_ha_struct *sas_ha)
{
- unsigned long flags;
-
- /* Set the state to unregistered to avoid further
- * events to be queued */
- spin_lock_irqsave(&sas_ha->state_lock, flags);
- sas_ha->state = SAS_HA_UNREGISTERED;
- spin_unlock_irqrestore(&sas_ha->state_lock, flags);
- scsi_flush_work(sas_ha->core.shost);
+ /* Set the state to unregistered to avoid further unchained
+ * events to be queued, and flush any in-progress drainers
+ */
+ mutex_lock(&sas_ha->drain_mutex);
+ spin_lock_irq(&sas_ha->state_lock);
+ clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
+ spin_unlock_irq(&sas_ha->state_lock);
+ __sas_drain_work(sas_ha);
+ mutex_unlock(&sas_ha->drain_mutex);
sas_unregister_ports(sas_ha);
+ /* flush unregistration work */
+ mutex_lock(&sas_ha->drain_mutex);
+ __sas_drain_work(sas_ha);
+ mutex_unlock(&sas_ha->drain_mutex);
+
if (sas_ha->lldd_max_execute_num > 1) {
sas_shutdown_queue(sas_ha);
sas_ha->lldd_max_execute_num = 1;
@@ -190,15 +200,75 @@ static int sas_get_linkerrors(struct sas_phy *phy)
return sas_smp_get_phy_events(phy);
}
-int sas_phy_enable(struct sas_phy *phy, int enable)
+int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
+{
+ struct domain_device *dev = NULL;
+
+ /* try to route user requested link resets through libata */
+ if (asd_phy->port)
+ dev = asd_phy->port->port_dev;
+
+ /* validate that dev has been probed */
+ if (dev)
+ dev = sas_find_dev_by_rphy(dev->rphy);
+
+ if (dev && dev_is_sata(dev)) {
+ sas_ata_schedule_reset(dev);
+ sas_ata_wait_eh(dev);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * transport_sas_phy_reset - reset a phy and permit libata to manage the link
+ *
+ * phy reset request via sysfs in host workqueue context so we know we
+ * can block on eh and safely traverse the domain_device topology
+ */
+static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ enum phy_func reset_type;
+
+ if (hard_reset)
+ reset_type = PHY_FUNC_HARD_RESET;
+ else
+ reset_type = PHY_FUNC_LINK_RESET;
+
+ if (scsi_is_sas_phy_local(phy)) {
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
+ return 0;
+ return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
+ } else {
+ struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
+ struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
+ struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
+
+ if (ata_dev && !hard_reset) {
+ sas_ata_schedule_reset(ata_dev);
+ sas_ata_wait_eh(ata_dev);
+ return 0;
+ } else
+ return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
+ }
+}
+
+static int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
- enum phy_func command;
+ enum phy_func cmd;
if (enable)
- command = PHY_FUNC_LINK_RESET;
+ cmd = PHY_FUNC_LINK_RESET;
else
- command = PHY_FUNC_DISABLE;
+ cmd = PHY_FUNC_DISABLE;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
@@ -207,15 +277,18 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- if (!enable) {
- sas_phy_disconnected(asd_phy);
- sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL);
- }
- ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
+ if (enable)
+ ret = transport_sas_phy_reset(phy, 0);
+ else
+ ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
- ret = sas_smp_phy_control(ddev, phy->number, command, NULL);
+
+ if (enable)
+ ret = transport_sas_phy_reset(phy, 0);
+ else
+ ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
}
return ret;
}
@@ -225,6 +298,9 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
int ret;
enum phy_func reset_type;
+ if (!phy->enabled)
+ return -ENODEV;
+
if (hard_reset)
reset_type = PHY_FUNC_HARD_RESET;
else
@@ -285,9 +361,101 @@ int sas_set_phy_speed(struct sas_phy *phy,
return ret;
}
+static void sas_phy_release(struct sas_phy *phy)
+{
+ kfree(phy->hostdata);
+ phy->hostdata = NULL;
+}
+
+static void phy_reset_work(struct work_struct *work)
+{
+ struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
+
+ d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
+}
+
+static void phy_enable_work(struct work_struct *work)
+{
+ struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
+
+ d->enable_result = sas_phy_enable(d->phy, d->enable);
+}
+
+static int sas_phy_setup(struct sas_phy *phy)
+{
+ struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
+
+ if (!d)
+ return -ENOMEM;
+
+ mutex_init(&d->event_lock);
+ INIT_WORK(&d->reset_work, phy_reset_work);
+ INIT_WORK(&d->enable_work, phy_enable_work);
+ d->phy = phy;
+ phy->hostdata = d;
+
+ return 0;
+}
+
+static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_phy_data *d = phy->hostdata;
+ int rc;
+
+ if (!d)
+ return -ENOMEM;
+
+ /* libsas workqueue coordinates ata-eh reset with discovery */
+ mutex_lock(&d->event_lock);
+ d->reset_result = 0;
+ d->hard_reset = hard_reset;
+
+ spin_lock_irq(&ha->state_lock);
+ sas_queue_work(ha, &d->reset_work);
+ spin_unlock_irq(&ha->state_lock);
+
+ rc = sas_drain_work(ha);
+ if (rc == 0)
+ rc = d->reset_result;
+ mutex_unlock(&d->event_lock);
+
+ return rc;
+}
+
+static int queue_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_phy_data *d = phy->hostdata;
+ int rc;
+
+ if (!d)
+ return -ENOMEM;
+
+ /* libsas workqueue coordinates ata-eh reset with discovery */
+ mutex_lock(&d->event_lock);
+ d->enable_result = 0;
+ d->enable = enable;
+
+ spin_lock_irq(&ha->state_lock);
+ sas_queue_work(ha, &d->enable_work);
+ spin_unlock_irq(&ha->state_lock);
+
+ rc = sas_drain_work(ha);
+ if (rc == 0)
+ rc = d->enable_result;
+ mutex_unlock(&d->event_lock);
+
+ return rc;
+}
+
static struct sas_function_template sft = {
- .phy_enable = sas_phy_enable,
- .phy_reset = sas_phy_reset,
+ .phy_enable = queue_phy_enable,
+ .phy_reset = queue_phy_reset,
+ .phy_setup = sas_phy_setup,
+ .phy_release = sas_phy_release,
.set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors,
.smp_handler = sas_smp_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 14e21b5fb8ba..f05c63879949 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -30,6 +30,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
+#include <scsi/sas_ata.h>
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
@@ -38,6 +39,18 @@
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
+struct sas_phy_data {
+ /* let reset be performed in sas_queue_work() context */
+ struct sas_phy *phy;
+ struct mutex event_lock;
+ int hard_reset;
+ int reset_result;
+ struct work_struct reset_work;
+ int enable;
+ int enable_result;
+ struct work_struct enable_work;
+};
+
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_show_class(enum sas_class class, char *buf);
@@ -56,6 +69,9 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
+void sas_disable_revalidation(struct sas_ha_struct *ha);
+void sas_enable_revalidation(struct sas_ha_struct *ha);
+void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
@@ -64,6 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
+void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -72,10 +89,17 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
+void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
-
+struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+int sas_ex_phy_discover(struct domain_device *dev, int single);
+int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
+ struct smp_resp *rps_resp);
+int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
+void sas_free_device(struct kref *kref);
+
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
struct request *rsp);
@@ -90,36 +114,13 @@ static inline int sas_smp_host_handler(struct Scsi_Host *shost,
}
#endif
-static inline void sas_queue_event(int event, spinlock_t *lock,
- unsigned long *pending,
- struct work_struct *work,
- struct sas_ha_struct *sas_ha)
+static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
{
- unsigned long flags;
-
- spin_lock_irqsave(lock, flags);
- if (test_bit(event, pending)) {
- spin_unlock_irqrestore(lock, flags);
- return;
- }
- __set_bit(event, pending);
- spin_unlock_irqrestore(lock, flags);
-
- spin_lock_irqsave(&sas_ha->state_lock, flags);
- if (sas_ha->state != SAS_HA_UNREGISTERED) {
- scsi_queue_work(sas_ha->core.shost, work);
- }
- spin_unlock_irqrestore(&sas_ha->state_lock, flags);
-}
-
-static inline void sas_begin_event(int event, spinlock_t *lock,
- unsigned long *pending)
-{
- unsigned long flags;
-
- spin_lock_irqsave(lock, flags);
- __clear_bit(event, pending);
- spin_unlock_irqrestore(lock, flags);
+ SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
+ func, dev->parent ? "exp-attached" :
+ "direct-attached",
+ SAS_ADDR(dev->sas_addr), err);
+ sas_unregister_dev(dev->port, dev);
}
static inline void sas_fill_in_rphy(struct domain_device *dev,
@@ -132,6 +133,7 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
case SATA_DEV:
/* FIXME: need sata device type */
case SAS_END_DEV:
+ case SATA_PENDING:
rphy->identify.device_type = SAS_END_DEVICE;
break;
case EDGE_DEV:
@@ -146,6 +148,22 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
}
}
+static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
+{
+ struct sas_phy *phy = p->phy;
+
+ if (dev) {
+ if (dev_is_sata(dev))
+ phy->identify.device_type = SAS_END_DEVICE;
+ else
+ phy->identify.device_type = dev->dev_type;
+ phy->identify.target_port_protocols = dev->tproto;
+ } else {
+ phy->identify.device_type = SAS_PHY_UNUSED;
+ phy->identify.target_port_protocols = 0;
+ }
+}
+
static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
{
struct expander_device *ex = &dev->ex_dev;
@@ -161,4 +179,23 @@ static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
sas_port_add_phy(ex->parent_port, ex_phy->phy);
}
+static inline struct domain_device *sas_alloc_device(void)
+{
+ struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (dev) {
+ INIT_LIST_HEAD(&dev->siblings);
+ INIT_LIST_HEAD(&dev->dev_list_node);
+ INIT_LIST_HEAD(&dev->disco_list_node);
+ kref_init(&dev->kref);
+ spin_lock_init(&dev->done_lock);
+ }
+ return dev;
+}
+
+static inline void sas_put_device(struct domain_device *dev)
+{
+ kref_put(&dev->kref, sas_free_device);
+}
+
#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index e0f5018e9071..dcfd4a9105c5 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -36,8 +36,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
- &phy->phy_events_pending);
+ clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -48,8 +47,7 @@ static void sas_phye_oob_done(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
- &phy->phy_events_pending);
+ clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
phy->error = 0;
}
@@ -63,8 +61,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
- &phy->phy_events_pending);
+ clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
sas_deform_port(phy, 1);
@@ -95,8 +92,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
- &phy->phy_events_pending);
+ clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 42fd1f25b664..eb19c016d500 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -104,13 +104,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
/* add the phy to the port */
list_add_tail(&phy->port_phy_el, &port->phy_list);
+ sas_phy_set_target(phy, port->port_dev);
phy->port = port;
port->num_phys++;
port->phy_mask |= (1U << phy->id);
- if (!port->phy)
- port->phy = phy->phy;
-
if (*(u64 *)port->attached_sas_addr == 0) {
port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@@ -125,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
- port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
+ port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
BUG_ON(!port->port);
sas_port_add(port->port);
}
@@ -170,13 +168,13 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
dev->pathways--;
if (port->num_phys == 1) {
- if (dev && gone)
- dev->gone = 1;
- sas_unregister_domain_devices(port);
+ sas_unregister_domain_devices(port, gone);
sas_port_delete(port->port);
port->port = NULL;
- } else
+ } else {
sas_port_delete_phy(port->port, phy->phy);
+ sas_device_set_phy(dev, port->port);
+ }
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@@ -185,6 +183,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
spin_lock(&port->phy_list_lock);
list_del_init(&phy->port_phy_el);
+ sas_phy_set_target(phy, NULL);
phy->port = NULL;
port->num_phys--;
port->phy_mask &= ~(1U << phy->id);
@@ -213,8 +212,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
- &phy->port_events_pending);
+ clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
sas_form_port(phy);
}
@@ -227,8 +225,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
- &phy->port_events_pending);
+ clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
@@ -244,8 +241,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
- &phy->port_events_pending);
+ clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@@ -256,8 +252,7 @@ void sas_porte_timer_event(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
- &phy->port_events_pending);
+ clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@@ -268,8 +263,7 @@ void sas_porte_hard_reset(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
- sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
- &phy->port_events_pending);
+ clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@@ -282,6 +276,8 @@ static void sas_init_port(struct asd_sas_port *port,
memset(port, 0, sizeof(*port));
port->id = i;
INIT_LIST_HEAD(&port->dev_list);
+ INIT_LIST_HEAD(&port->disco_list);
+ INIT_LIST_HEAD(&port->destroy_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b6e233d9a0a1..f0b9b7bf1882 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -49,27 +49,12 @@
#include <linux/scatterlist.h>
#include <linux/libata.h>
-/* ---------- SCSI Host glue ---------- */
-
-static void sas_scsi_task_done(struct sas_task *task)
+/* record final status and free the task */
+static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
- struct scsi_cmnd *sc = task->uldd_task;
int hs = 0, stat = 0;
- if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- /* Aborted tasks will be completed by the error handler */
- SAS_DPRINTK("task done but aborted\n");
- return;
- }
-
- if (unlikely(!sc)) {
- SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
- list_del_init(&task->list);
- sas_free_task(task);
- return;
- }
-
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
hs = DID_NO_CONNECT;
@@ -124,10 +109,41 @@ static void sas_scsi_task_done(struct sas_task *task)
break;
}
}
- ASSIGN_SAS_TASK(sc, NULL);
+
sc->result = (hs << 16) | stat;
+ ASSIGN_SAS_TASK(sc, NULL);
list_del_init(&task->list);
sas_free_task(task);
+}
+
+static void sas_scsi_task_done(struct sas_task *task)
+{
+ struct scsi_cmnd *sc = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ if (test_bit(SAS_HA_FROZEN, &ha->state))
+ task = NULL;
+ else
+ ASSIGN_SAS_TASK(sc, NULL);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
+
+ if (unlikely(!task)) {
+ /* task will be completed by the error handler */
+ SAS_DPRINTK("task done but aborted\n");
+ return;
+ }
+
+ if (unlikely(!sc)) {
+ SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
+ list_del_init(&task->list);
+ sas_free_task(task);
+ return;
+ }
+
+ sas_end_task(sc, task);
sc->scsi_done(sc);
}
@@ -192,17 +208,15 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int res = 0;
/* If the device fell off, no sense in issuing commands */
- if (dev->gone) {
+ if (test_bit(SAS_DEV_GONE, &dev->state)) {
cmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
if (dev_is_sata(dev)) {
- unsigned long flags;
-
- spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ spin_lock_irq(dev->sata_dev.ap->lock);
res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
- spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+ spin_unlock_irq(dev->sata_dev.ap->lock);
return res;
}
@@ -235,24 +249,38 @@ out_done:
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
- struct sas_task *task = TO_SAS_TASK(cmd);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+ struct sas_task *task = TO_SAS_TASK(cmd);
+
+ /* At this point, we only get called following an actual abort
+ * of the task, so we should be guaranteed not to be racing with
+ * any completions from the LLD. Task is freed after this.
+ */
+ sas_end_task(cmd, task);
- /* remove the aborted task flag to allow the task to be
- * completed now. At this point, we only get called following
- * an actual abort of the task, so we should be guaranteed not
- * to be racing with any completions from the LLD (hence we
- * don't need the task state lock to clear the flag) */
- task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
- /* Now call task_done. However, task will be free'd after
- * this */
- task->task_done(task);
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
- * error handler pending list */
+ * error handler pending list.
+ */
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
+static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
+{
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_ha_struct *ha = dev->port->ha;
+ struct sas_task *task = TO_SAS_TASK(cmd);
+
+ if (!dev_is_sata(dev)) {
+ sas_eh_finish_cmd(cmd);
+ return;
+ }
+
+ /* report the timeout to libata */
+ sas_end_task(cmd, task);
+ list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
+}
+
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
@@ -260,7 +288,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
- sas_eh_finish_cmd(cmd);
+ sas_eh_defer_cmd(cmd);
}
}
@@ -295,6 +323,7 @@ enum task_disposition {
TASK_IS_DONE,
TASK_IS_ABORTED,
TASK_IS_AT_LU,
+ TASK_IS_NOT_AT_HA,
TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
};
@@ -311,19 +340,18 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
struct scsi_core *core = &ha->core;
struct sas_task *t, *n;
+ mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
- list_for_each_entry_safe(t, n, &core->task_queue, list) {
+ list_for_each_entry_safe(t, n, &core->task_queue, list)
if (task == t) {
list_del_init(&t->list);
- spin_unlock_irqrestore(&core->task_queue_lock,
- flags);
- SAS_DPRINTK("%s: task 0x%p aborted from "
- "task_queue\n",
- __func__, task);
- return TASK_IS_ABORTED;
+ break;
}
- }
spin_unlock_irqrestore(&core->task_queue_lock, flags);
+ mutex_unlock(&core->task_queue_flush);
+
+ if (task == t)
+ return TASK_IS_NOT_AT_HA;
}
for (i = 0; i < 5; i++) {
@@ -411,30 +439,26 @@ static int sas_recover_I_T(struct domain_device *dev)
return res;
}
-/* Find the sas_phy that's attached to this device */
-struct sas_phy *sas_find_local_phy(struct domain_device *dev)
+/* take a reference on the last known good phy for this device */
+struct sas_phy *sas_get_local_phy(struct domain_device *dev)
{
- struct domain_device *pdev = dev->parent;
- struct ex_phy *exphy = NULL;
- int i;
+ struct sas_ha_struct *ha = dev->port->ha;
+ struct sas_phy *phy;
+ unsigned long flags;
- /* Directly attached device */
- if (!pdev)
- return dev->port->phy;
+ /* a published domain device always has a valid phy, it may be
+ * stale, but it is never NULL
+ */
+ BUG_ON(!dev->phy);
- /* Otherwise look in the expander */
- for (i = 0; i < pdev->ex_dev.num_phys; i++)
- if (!memcmp(dev->sas_addr,
- pdev->ex_dev.ex_phy[i].attached_sas_addr,
- SAS_ADDR_SIZE)) {
- exphy = &pdev->ex_dev.ex_phy[i];
- break;
- }
+ spin_lock_irqsave(&ha->phy_port_lock, flags);
+ phy = dev->phy;
+ get_device(&phy->dev);
+ spin_unlock_irqrestore(&ha->phy_port_lock, flags);
- BUG_ON(!exphy);
- return exphy->phy;
+ return phy;
}
-EXPORT_SYMBOL_GPL(sas_find_local_phy);
+EXPORT_SYMBOL_GPL(sas_get_local_phy);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@@ -461,7 +485,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_phy *phy = sas_find_local_phy(dev);
+ struct sas_phy *phy = sas_get_local_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
@@ -469,6 +493,8 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
kobject_name(&phy->dev.kobj),
res);
+ sas_put_local_phy(phy);
+
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
@@ -495,9 +521,7 @@ try_bus_reset:
return FAILED;
}
-static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
- struct list_head *work_q,
- struct list_head *done_q)
+static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE;
@@ -505,13 +529,28 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ LIST_HEAD(done);
-Again:
+ /* clean out any commands that won the completion vs eh race */
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
- struct sas_task *task = TO_SAS_TASK(cmd);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_task *task;
+
+ spin_lock_irqsave(&dev->done_lock, flags);
+ /* by this point the lldd has either observed
+ * SAS_HA_FROZEN and is leaving the task alone, or has
+ * won the race with eh and decided to complete it
+ */
+ task = TO_SAS_TASK(cmd);
+ spin_unlock_irqrestore(&dev->done_lock, flags);
if (!task)
- continue;
+ list_move_tail(&cmd->eh_entry, &done);
+ }
+
+ Again:
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
@@ -531,15 +570,23 @@ Again:
cmd->eh_eflags = 0;
switch (res) {
+ case TASK_IS_NOT_AT_HA:
+ SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
+ __func__, task,
+ cmd->retries ? "retry" : "aborted");
+ if (cmd->retries)
+ cmd->retries--;
+ sas_eh_finish_cmd(cmd);
+ continue;
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
- sas_eh_finish_cmd(cmd);
+ sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
- sas_eh_finish_cmd(cmd);
+ sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -550,7 +597,7 @@ Again:
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
- sas_eh_finish_cmd(cmd);
+ sas_eh_defer_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
@@ -560,7 +607,8 @@ Again:
SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
task);
tmf_resp = sas_recover_I_T(task->dev);
- if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
+ if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
+ tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
@@ -607,13 +655,16 @@ Again:
goto clear_q;
}
}
- return list_empty(work_q);
-clear_q:
+ out:
+ list_splice_tail(&done, work_q);
+ list_splice_tail_init(&ha->eh_ata_q, work_q);
+ return;
+
+ clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
-
- return list_empty(work_q);
+ goto out;
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
@@ -627,12 +678,17 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
- SAS_DPRINTK("Enter %s\n", __func__);
+ SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
+ __func__, shost->host_busy, shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
- * complete via the normal sas_task completion mechanism)
+ * complete via the normal sas_task completion mechanism),
+ * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
*/
- if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
+ set_bit(SAS_HA_FROZEN, &ha->state);
+ sas_eh_handle_sas_errors(shost, &eh_work_q);
+ clear_bit(SAS_HA_FROZEN, &ha->state);
+ if (list_empty(&eh_work_q))
goto out;
/*
@@ -641,59 +697,26 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
- if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
- scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
+ sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+ if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
+ scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
+ if (ha->lldd_max_execute_num > 1)
+ wake_up_process(ha->core.queue_thread);
+
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
- SAS_DPRINTK("--- Exit %s\n", __func__);
- return;
+ SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
+ __func__, shost->host_busy, shost->host_failed);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
- struct sas_task *task = TO_SAS_TASK(cmd);
- unsigned long flags;
- enum blk_eh_timer_return rtn;
-
- if (sas_ata_timed_out(cmd, task, &rtn))
- return rtn;
-
- if (!task) {
- cmd->request->timeout /= 2;
- SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
- cmd, task, (cmd->request->timeout ?
- "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
- if (!cmd->request->timeout)
- return BLK_EH_NOT_HANDLED;
- return BLK_EH_RESET_TIMER;
- }
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
- if (task->task_state_flags & SAS_TASK_STATE_DONE) {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
- "BLK_EH_HANDLED\n", cmd, task);
- return BLK_EH_HANDLED;
- }
- if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
- "BLK_EH_RESET_TIMER\n",
- cmd, task);
- return BLK_EH_RESET_TIMER;
- }
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
- cmd, task);
+ scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
@@ -737,27 +760,15 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
return found_dev;
}
-static inline struct domain_device *sas_find_target(struct scsi_target *starget)
-{
- struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
-
- return sas_find_dev_by_rphy(rphy);
-}
-
int sas_target_alloc(struct scsi_target *starget)
{
- struct domain_device *found_dev = sas_find_target(starget);
- int res;
+ struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
+ struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
if (!found_dev)
return -ENODEV;
- if (dev_is_sata(found_dev)) {
- res = sas_ata_init_host_and_port(found_dev, starget);
- if (res)
- return res;
- }
-
+ kref_get(&found_dev->kref);
starget->hostdata = found_dev;
return 0;
}
@@ -797,14 +808,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
-void sas_slave_destroy(struct scsi_device *scsi_dev)
-{
- struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-
- if (dev_is_sata(dev))
- sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
-}
-
int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
@@ -871,9 +874,11 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
int res;
struct sas_internal *i = to_sas_internal(core->shost->transportt);
+ mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
while (!kthread_should_stop() &&
- !list_empty(&core->task_queue)) {
+ !list_empty(&core->task_queue) &&
+ !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
if (can_queue >= 0) {
@@ -909,6 +914,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
}
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
+ mutex_unlock(&core->task_queue_flush);
}
/**
@@ -935,6 +941,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha)
struct scsi_core *core = &sas_ha->core;
spin_lock_init(&core->task_queue_lock);
+ mutex_init(&core->task_queue_flush);
core->task_queue_size = 0;
INIT_LIST_HEAD(&core->task_queue);
@@ -972,49 +979,6 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
}
/*
- * Call the LLDD task abort routine directly. This function is intended for
- * use by upper layers that need to tell the LLDD to abort a task.
- */
-int __sas_task_abort(struct sas_task *task)
-{
- struct sas_internal *si =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
- unsigned long flags;
- int res;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
- task->task_state_flags & SAS_TASK_STATE_DONE) {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
- task);
- return 0;
- }
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- if (!si->dft->lldd_abort_task)
- return -ENODEV;
-
- res = si->dft->lldd_abort_task(task);
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
- (res == TMF_RESP_FUNC_COMPLETE))
- {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- task->task_done(task);
- return 0;
- }
-
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
- task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- return -EAGAIN;
-}
-
-/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
*/
@@ -1043,27 +1007,15 @@ void sas_task_abort(struct sas_task *task)
}
}
-int sas_slave_alloc(struct scsi_device *scsi_dev)
-{
- struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
-
- if (dev_is_sata(dev))
- return ata_sas_port_init(dev->sata_dev.ap);
-
- return 0;
-}
-
void sas_target_destroy(struct scsi_target *starget)
{
- struct domain_device *found_dev = sas_find_target(starget);
+ struct domain_device *found_dev = starget->hostdata;
if (!found_dev)
return;
- if (dev_is_sata(found_dev))
- ata_sas_port_destroy(found_dev->sata_dev.ap);
-
- return;
+ starget->hostdata = NULL;
+ sas_put_device(found_dev);
}
static void sas_parse_addr(u8 *sas_addr, const char *p)
@@ -1108,16 +1060,12 @@ EXPORT_SYMBOL_GPL(sas_request_addr);
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
-EXPORT_SYMBOL_GPL(sas_slave_destroy);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
-EXPORT_SYMBOL_GPL(__sas_task_abort);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
-EXPORT_SYMBOL_GPL(sas_phy_enable);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 88928f00aa2d..fe5d396aca73 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
@@ -22,6 +22,8 @@
ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
ccflags-$(GCOV) += -O0
+ccflags-y += -Werror
+
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 825f9307417a..3a1ffdd6d831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -534,6 +534,7 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
struct lpfc_nodelist *);
+
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
@@ -541,8 +542,6 @@ struct lpfc_hba {
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
-
-
IOCB_t * (*lpfc_get_iocb_from_iocbq)
(struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
@@ -551,10 +550,12 @@ struct lpfc_hba {
/* MBOX interface function jump table entries */
int (*lpfc_sli_issue_mbox)
(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
/* Slow-path IOCB process function jump table entries */
void (*lpfc_sli_handle_slow_ring_event)
(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t mask);
+
/* INIT device interface function jump table entries */
int (*lpfc_sli_hbq_to_firmware)
(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
@@ -573,6 +574,10 @@ struct lpfc_hba {
int (*lpfc_selective_reset)
(struct lpfc_hba *);
+ int (*lpfc_bg_scsi_prep_dma_buf)
+ (struct lpfc_hba *, struct lpfc_scsi_buf *);
+ /* Add new entries here */
+
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
@@ -835,9 +840,12 @@ struct lpfc_hba {
struct dentry *debug_dumpData; /* BlockGuard BPL */
struct dentry *debug_dumpDif; /* BlockGuard BPL */
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
+ struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
+ struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
struct dentry *debug_writeGuard; /* inject write guard_tag errors */
struct dentry *debug_writeApp; /* inject write app_tag errors */
struct dentry *debug_writeRef; /* inject write ref_tag errors */
+ struct dentry *debug_readGuard; /* inject read guard_tag errors */
struct dentry *debug_readApp; /* inject read app_tag errors */
struct dentry *debug_readRef; /* inject read ref_tag errors */
@@ -845,10 +853,13 @@ struct lpfc_hba {
uint32_t lpfc_injerr_wgrd_cnt;
uint32_t lpfc_injerr_wapp_cnt;
uint32_t lpfc_injerr_wref_cnt;
+ uint32_t lpfc_injerr_rgrd_cnt;
uint32_t lpfc_injerr_rapp_cnt;
uint32_t lpfc_injerr_rref_cnt;
+ uint32_t lpfc_injerr_nportid;
+ struct lpfc_name lpfc_injerr_wwpn;
sector_t lpfc_injerr_lba;
-#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff
+#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
@@ -901,6 +912,8 @@ struct lpfc_hba {
atomic_t fast_event_count;
uint32_t fcoe_eventtag;
uint32_t fcoe_eventtag_at_fcf_scan;
+ uint32_t fcoe_cvl_eventtag;
+ uint32_t fcoe_cvl_eventtag_attn;
struct lpfc_fcf fcf;
uint8_t fc_map[3];
uint8_t valid_vlan;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f6697cb0e216..5eb2bc116183 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -353,7 +353,7 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
uint32_t if_type;
uint8_t sli_family;
- char fwrev[32];
+ char fwrev[FW_REV_STR_SIZE];
int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
@@ -922,11 +922,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (rc == -EPERM) {
- /* no privilage for reset, restore if needed */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
+ /* no privilage for reset */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3150 No privilage to perform the requested "
+ "access: x%x\n", reg_val);
} else if (rc == -EIO) {
/* reset failed, there is nothing more we can do */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3153 Fail to perform the requested "
+ "access: x%x\n", reg_val);
return rc;
}
@@ -2571,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
# objects that have been registered with the nameserver after login.
*/
-LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
"Deregister nameserver objects before LOGO");
/*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 56a86baece5b..141e4b40bb55 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -589,7 +589,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
}
cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmdiocbq->iocb.ulpContext = rpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ else
+ cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = NULL;
cmdiocbq->context2 = NULL;
@@ -1768,7 +1771,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
bf_set(lpfc_mbx_set_diag_state_link_type,
&link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -3977,7 +3980,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
- "subsys_fcoe, opcode:x%x\n",
+ "subsys_comn, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
nemb_mse, dmabuf);
@@ -3985,7 +3988,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3107 Reject SLI_CONFIG "
- "subsys_fcoe, opcode:x%x\n",
+ "subsys_comn, opcode:x%x\n",
opcode);
rc = -EPERM;
break;
@@ -4556,7 +4559,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
- if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
+ /* Let type 4 (well known data) through because the data is
+ * returned in varwords[4-8]
+ * otherwise check the recieve length and fetch the buffer addr
+ */
+ if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
+ (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
/* rebuild the command for sli4 using our own buffers
* like we do for biu diags
*/
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 26924b7a6cde..330dd7192a7f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -462,3 +462,4 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *);
int lpfc_sli4_read_config(struct lpfc_hba *phba);
int lpfc_scsi_buf_update(struct lpfc_hba *phba);
+void lpfc_sli4_node_prep(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 707081d0a226..93e96b3c9097 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1076,7 +1076,7 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
- char fwrev[16];
+ char fwrev[FW_REV_STR_SIZE];
int n;
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
@@ -1834,7 +1834,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint8_t *fwname;
if (phba->sli_rev == LPFC_SLI_REV4)
- sprintf(fwrevision, "%s", vp->rev.opFwName);
+ snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
else if (vp->rev.rBit) {
if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3587a3fe8fcb..af04b0d6688d 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -997,36 +997,41 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
-static int
-lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct dentry *dent = file->f_dentry;
struct lpfc_hba *phba = file->private_data;
- char cbuf[16];
+ char cbuf[32];
+ uint64_t tmp = 0;
int cnt = 0;
if (dent == phba->debug_writeGuard)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
else if (dent == phba->debug_writeApp)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
else if (dent == phba->debug_writeRef)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+ else if (dent == phba->debug_readGuard)
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
else if (dent == phba->debug_readApp)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
else if (dent == phba->debug_readRef)
- cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
- else if (dent == phba->debug_InjErrLBA)
- cnt = snprintf(cbuf, 16, "0x%lx\n",
- (unsigned long) phba->lpfc_injerr_lba);
- else
+ cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+ else if (dent == phba->debug_InjErrNPortID)
+ cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+ else if (dent == phba->debug_InjErrWWPN) {
+ memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+ tmp = cpu_to_be64(tmp);
+ cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+ } else if (dent == phba->debug_InjErrLBA) {
+ if (phba->lpfc_injerr_lba == (sector_t)(-1))
+ cnt = snprintf(cbuf, 32, "off\n");
+ else
+ cnt = snprintf(cbuf, 32, "0x%llx\n",
+ (uint64_t) phba->lpfc_injerr_lba);
+ } else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0547 Unknown debugfs error injection entry\n");
@@ -1040,7 +1045,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
struct dentry *dent = file->f_dentry;
struct lpfc_hba *phba = file->private_data;
char dstbuf[32];
- unsigned long tmp;
+ uint64_t tmp = 0;
int size;
memset(dstbuf, 0, 32);
@@ -1048,7 +1053,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
if (copy_from_user(dstbuf, buf, size))
return 0;
- if (strict_strtoul(dstbuf, 0, &tmp))
+ if (dent == phba->debug_InjErrLBA) {
+ if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ tmp = (uint64_t)(-1);
+ }
+
+ if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
return 0;
if (dent == phba->debug_writeGuard)
@@ -1057,13 +1067,20 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
else if (dent == phba->debug_writeRef)
phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+ else if (dent == phba->debug_readGuard)
+ phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
else if (dent == phba->debug_readApp)
phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
else if (dent == phba->debug_readRef)
phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
else if (dent == phba->debug_InjErrLBA)
phba->lpfc_injerr_lba = (sector_t)tmp;
- else
+ else if (dent == phba->debug_InjErrNPortID)
+ phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+ else if (dent == phba->debug_InjErrWWPN) {
+ tmp = cpu_to_be64(tmp);
+ memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+ } else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0548 Unknown debugfs error injection entry\n");
@@ -3517,7 +3534,7 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = {
#undef lpfc_debugfs_op_dif_err
static const struct file_operations lpfc_debugfs_op_dif_err = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_dif_err_open,
+ .open = simple_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_dif_err_read,
.write = lpfc_debugfs_dif_err_write,
@@ -3945,6 +3962,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ snprintf(name, sizeof(name), "InjErrNPortID");
+ phba->debug_InjErrNPortID =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrNPortID) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0809 Cannot create debugfs InjErrNPortID\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "InjErrWWPN");
+ phba->debug_InjErrWWPN =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_InjErrWWPN) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0810 Cannot create debugfs InjErrWWPN\n");
+ goto debug_failed;
+ }
+
snprintf(name, sizeof(name), "writeGuardInjErr");
phba->debug_writeGuard =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -3978,6 +4017,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ snprintf(name, sizeof(name), "readGuardInjErr");
+ phba->debug_readGuard =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dif_err);
+ if (!phba->debug_readGuard) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0808 Cannot create debugfs readGuard\n");
+ goto debug_failed;
+ }
+
snprintf(name, sizeof(name), "readAppInjErr");
phba->debug_readApp =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4306,6 +4356,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
}
+ if (phba->debug_InjErrNPortID) { /* InjErrNPortID */
+ debugfs_remove(phba->debug_InjErrNPortID);
+ phba->debug_InjErrNPortID = NULL;
+ }
+ if (phba->debug_InjErrWWPN) {
+ debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+ phba->debug_InjErrWWPN = NULL;
+ }
if (phba->debug_writeGuard) {
debugfs_remove(phba->debug_writeGuard); /* writeGuard */
phba->debug_writeGuard = NULL;
@@ -4318,6 +4376,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_writeRef); /* writeRef */
phba->debug_writeRef = NULL;
}
+ if (phba->debug_readGuard) {
+ debugfs_remove(phba->debug_readGuard); /* readGuard */
+ phba->debug_readGuard = NULL;
+ }
if (phba->debug_readApp) {
debugfs_remove(phba->debug_readApp); /* readApp */
phba->debug_readApp = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7afc757338de..3407b39e0a3f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* due to new FCF discovery
*/
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
- (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
- !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+ (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+ if (phba->link_state < LPFC_LINK_UP)
+ goto stop_rr_fcf_flogi;
+ if ((phba->fcoe_cvl_eventtag_attn ==
+ phba->fcoe_cvl_eventtag) &&
+ (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+ goto stop_rr_fcf_flogi;
+ else
+ phba->fcoe_cvl_eventtag_attn =
+ phba->fcoe_cvl_eventtag;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on FCF (x%x), "
"status:x%x/x%x, tmo:x%x, perform "
@@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+stop_rr_fcf_flogi:
/* FLOGI failure */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
@@ -1526,7 +1535,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
memcpy(&ndlp->active_rrqs.xri_bitmap,
&rrq.xri_bitmap,
sizeof(ndlp->active_rrqs.xri_bitmap));
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over the
* state and names.
@@ -1536,6 +1544,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
new_ndlp->nlp_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
@@ -7172,7 +7181,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0126 FDISC failed. (%d/%d)\n",
+ "0126 FDISC failed. (x%x/x%x)\n",
irsp->ulpStatus, irsp->un.ulpWord[4]);
goto fdisc_failed;
}
@@ -7283,6 +7292,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int rc;
vport->port_state = LPFC_FDISC;
+ vport->fc_myDID = 0;
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_FDISC);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 678a4b11059c..b507536dc5b5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if (mboxq->u.mb.mbxStatus) {
+ /*
+ * VFI not supported for interface type 0, so ignore any mailbox
+ * error (except VFI in use) and continue with the discovery.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"2018 REG_VFI mbxStatus error x%x "
"HBA state x%x\n",
@@ -2977,9 +2984,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock(shost->host_lock);
}
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
@@ -3029,9 +3036,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
- spin_lock_irq(shost->host_lock);
+ spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock(shost->host_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -5332,6 +5339,10 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
+ /* check for active node */
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ return 0;
+
return ndlp->nlp_rpi == *rpi;
}
@@ -5669,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
ret = 1;
spin_unlock_irq(shost->host_lock);
goto out;
- } else {
+ } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ ret = 1;
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2624 RPI %x DID %x flg %x still "
- "logged in\n",
- ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag);
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- ret = 1;
+ "2624 RPI %x DID %x flag %x "
+ "still logged in\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag);
}
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7245bead3755..5f280b5ae3db 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -70,6 +70,7 @@
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+#define FW_REV_STR_SIZE 32
/* Common Transport structures and definitions */
union CtRevisionId {
@@ -2567,6 +2568,8 @@ typedef struct {
#define DMP_MEM_REG 0x1
#define DMP_NV_PARAMS 0x2
+#define DMP_LMSD 0x3 /* Link Module Serial Data */
+#define DMP_WELL_KNOWN 0x4
#define DMP_REGION_VPD 0xe
#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index e5bfa7f334e3..91f09761bd32 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -321,6 +321,10 @@ struct lpfc_cqe {
#define CQE_STATUS_CMD_REJECT 0xb
#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
+#define CQE_STATUS_DI_ERROR 0x16
+
+/* Used when mapping CQE status to IOCB */
+#define LPFC_IOCB_STATUS_MASK 0xf
/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
#define CQE_HW_STATUS_NO_ERR 0x0
@@ -334,6 +338,12 @@ struct lpfc_cqe {
#define CQE_CODE_XRI_ABORTED 0x5
#define CQE_CODE_RECEIVE_V1 0x9
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK 0x1FF;
+
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
uint32_t word0;
@@ -348,6 +358,21 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_hw_status_WORD word0
uint32_t total_data_placed;
uint32_t parameter;
+#define lpfc_wcqe_c_bg_edir_SHIFT 5
+#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
+#define lpfc_wcqe_c_bg_edir_WORD parameter
+#define lpfc_wcqe_c_bg_tdpv_SHIFT 3
+#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001
+#define lpfc_wcqe_c_bg_tdpv_WORD parameter
+#define lpfc_wcqe_c_bg_re_SHIFT 2
+#define lpfc_wcqe_c_bg_re_MASK 0x00000001
+#define lpfc_wcqe_c_bg_re_WORD parameter
+#define lpfc_wcqe_c_bg_ae_SHIFT 1
+#define lpfc_wcqe_c_bg_ae_MASK 0x00000001
+#define lpfc_wcqe_c_bg_ae_WORD parameter
+#define lpfc_wcqe_c_bg_ge_SHIFT 0
+#define lpfc_wcqe_c_bg_ge_MASK 0x00000001
+#define lpfc_wcqe_c_bg_ge_WORD parameter
uint32_t word3;
#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
@@ -359,8 +384,8 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_pv_MASK 0x00000001
#define lpfc_wcqe_c_pv_WORD word3
#define lpfc_wcqe_c_priority_SHIFT 24
-#define lpfc_wcqe_c_priority_MASK 0x00000007
-#define lpfc_wcqe_c_priority_WORD word3
+#define lpfc_wcqe_c_priority_MASK 0x00000007
+#define lpfc_wcqe_c_priority_WORD word3
#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
@@ -715,12 +740,20 @@ struct lpfc_register {
#define lpfc_eqcq_doorbell_eqci_SHIFT 9
#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
#define lpfc_eqcq_doorbell_eqci_WORD word0
-#define lpfc_eqcq_doorbell_cqid_SHIFT 0
-#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
-#define lpfc_eqcq_doorbell_cqid_WORD word0
-#define lpfc_eqcq_doorbell_eqid_SHIFT 0
-#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
-#define lpfc_eqcq_doorbell_eqid_WORD word0
+#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0
+#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF
+#define lpfc_eqcq_doorbell_cqid_lo_WORD word0
+#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11
+#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F
+#define lpfc_eqcq_doorbell_cqid_hi_WORD word0
+#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0
+#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF
+#define lpfc_eqcq_doorbell_eqid_lo_WORD word0
+#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11
+#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F
+#define lpfc_eqcq_doorbell_eqid_hi_WORD word0
+#define LPFC_CQID_HI_FIELD_SHIFT 10
+#define LPFC_EQID_HI_FIELD_SHIFT 9
#define LPFC_BMBX 0x0160
#define lpfc_bmbx_addr_SHIFT 2
@@ -3313,7 +3346,11 @@ struct xmit_bls_rsp64_wqe {
uint32_t rsrvd4;
struct wqe_did wqe_dest;
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4];
+ uint32_t word12;
+#define xmit_bls_rsp64_temprpi_SHIFT 0
+#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff
+#define xmit_bls_rsp64_temprpi_WORD word12
+ uint32_t rsvd_13_15[3];
};
struct wqe_rctl_dfctl {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index dfea2dada02c..9598fdcb08ab 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -32,6 +32,7 @@
#include <linux/aer.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/miscdevice.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1474,8 +1475,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
- if (pci_rd_rc1 == -EIO)
+ if (pci_rd_rc1 == -EIO) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3151 PCI bus read access failure: x%x\n",
+ readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
return;
+ }
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
@@ -1525,6 +1530,9 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
/* fall through for not able to recover */
}
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3152 Unrecoverable error, bring the port "
+ "offline\n");
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
@@ -2333,13 +2341,20 @@ lpfc_cleanup(struct lpfc_vport *vport)
continue;
}
+ /* take care of nodes in unused state before the state
+ * machine taking action.
+ */
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ lpfc_nlp_put(ndlp);
+ continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
-
}
/* At this point, ALL ndlp's should be gone
@@ -2513,6 +2528,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
}
/**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_vport **vports;
+ int i;
+
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->load_flag & FC_UNLOADING)
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vports[i]->fc_nodes,
+ nlp_listp) {
+ if (NLP_CHK_NODE_ACT(ndlp))
+ ndlp->nlp_rpi =
+ lpfc_sli4_alloc_rpi(phba);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_online - Initialize and bring a HBA online
* @phba: pointer to lpfc hba data structure.
*
@@ -2654,6 +2705,13 @@ lpfc_offline_prep(struct lpfc_hba * phba)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
+ /*
+ * Whenever an SLI4 port goes offline, free the
+ * RPI. Get a new RPI when the adapter port
+ * comes back online.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
lpfc_unreg_rpi(vports[i], ndlp);
}
}
@@ -2726,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
spin_lock(&phba->scsi_buf_list_lock);
- list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
sb->cur_iocbq.sli4_xritag =
phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+ set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.max_cfg_param.xri_used++;
+ phba->sli4_hba.xri_count++;
+ }
spin_unlock(&phba->scsi_buf_list_lock);
spin_unlock_irq(&phba->hbalock);
return 0;
@@ -3663,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2549 FCF (x%x) disconnected from network, "
"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3724,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
}
break;
case LPFC_FIP_EVENT_TYPE_CVL:
+ phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -4327,6 +4391,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs, sli_family;
+ int sges_per_segment;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@@ -4390,6 +4455,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+ /* With BlockGuard we can have multiple SGEs per Data Segemnt */
+ sges_per_segment = 1;
+ if (phba->cfg_enable_bg)
+ sges_per_segment = 2;
+
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4398,7 +4468,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* sgl sizes of must be a power of 2.
*/
buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+ (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
+ sizeof(struct sli4_sge)));
sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
@@ -4415,6 +4486,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
default:
break;
}
+
for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
dma_buf_size < max_buf_size && buf_size > dma_buf_size;
dma_buf_size = dma_buf_size << 1)
@@ -5158,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
* rpi is normalized to a zero base because the physical rpi is
* port based.
*/
- curr_rpi_range = phba->sli4_hba.next_rpi -
- phba->sli4_hba.max_cfg_param.rpi_base;
+ curr_rpi_range = phba->sli4_hba.next_rpi;
spin_unlock_irq(&phba->hbalock);
/*
@@ -5750,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.
ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2888 Port Error Detected "
- "during POST: "
- "port status reg 0x%x, "
- "port_smphr reg 0x%x, "
+ "2888 Unrecoverable port error "
+ "following POST: port status reg "
+ "0x%x, port_smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
reg_data.word0,
portsmphr_reg.word0,
@@ -6074,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
@@ -7163,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
uint32_t rdy_chk, num_resets = 0, reset_again = 0;
union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_register reg_data;
+ uint16_t devid;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
@@ -7209,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
LPFC_SLIPORT_INIT_PORT);
writel(reg_data.word0, phba->sli4_hba.u.if_type2.
CTRLregaddr);
-
+ /* flush */
+ pci_read_config_word(phba->pcidev,
+ PCI_DEVICE_ID, &devid);
/*
* Poll the Port Status Register and wait for RDY for
* up to 10 seconds. If the port doesn't respond, treat
@@ -7223,19 +7295,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
rc = -ENODEV;
goto out;
}
- if (bf_get(lpfc_sliport_status_rdy, &reg_data))
- break;
- if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+ if (bf_get(lpfc_sliport_status_rn, &reg_data))
reset_again++;
+ if (bf_get(lpfc_sliport_status_rdy, &reg_data))
break;
- }
}
/*
* If the port responds to the init request with
* reset needed, delay for a bit and restart the loop.
*/
- if (reset_again) {
+ if (reset_again && (rdy_chk < 1000)) {
msleep(10);
reset_again = 0;
continue;
@@ -7249,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
phba->work_status[1] = readl(
phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2890 Port Error Detected "
- "during Port Reset: "
- "port status reg 0x%x, "
+ "2890 Port error detected during port "
+ "reset(%d): port status reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
- reg_data.word0,
+ num_resets, reg_data.word0,
phba->work_status[0],
phba->work_status[1]);
rc = -ENODEV;
@@ -8112,6 +8181,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+
lpfc_stop_hba_timers(phba);
phba->pport->work_port_events = 0;
@@ -8644,6 +8716,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Final cleanup of txcmplq and reset the HBA */
lpfc_sli_brdrestart(phba);
+ kfree(phba->vpi_bmask);
+ kfree(phba->vpi_ids);
+
lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
@@ -9058,7 +9133,7 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
int
lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
{
- char fwrev[32];
+ char fwrev[FW_REV_STR_SIZE];
struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
struct list_head dma_buffer_list;
int i, rc = 0;
@@ -10012,6 +10087,36 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
+/**
+ * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine puts a reference count on the lpfc module whenever the
+ * character device is opened
+ **/
+static int
+lpfc_mgmt_open(struct inode *inode, struct file *filep)
+{
+ try_module_get(THIS_MODULE);
+ return 0;
+}
+
+/**
+ * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine removes a reference count from the lpfc module when the
+ * character device is closed
+ **/
+static int
+lpfc_mgmt_release(struct inode *inode, struct file *filep)
+{
+ module_put(THIS_MODULE);
+ return 0;
+}
+
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
@@ -10124,6 +10229,17 @@ static struct pci_driver lpfc_driver = {
.err_handler = &lpfc_err_handler,
};
+static const struct file_operations lpfc_mgmt_fop = {
+ .open = lpfc_mgmt_open,
+ .release = lpfc_mgmt_release,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "lpfcmgmt",
+ .fops = &lpfc_mgmt_fop,
+};
+
/**
* lpfc_init - lpfc module initialization routine
*
@@ -10144,6 +10260,11 @@ lpfc_init(void)
printk(LPFC_MODULE_DESC "\n");
printk(LPFC_COPYRIGHT "\n");
+ error = misc_register(&lpfc_mgmt_dev);
+ if (error)
+ printk(KERN_ERR "Could not register lpfcmgmt device, "
+ "misc_register returned with status %d", error);
+
if (lpfc_enable_npiv) {
lpfc_transport_functions.vport_create = lpfc_vport_create;
lpfc_transport_functions.vport_delete = lpfc_vport_delete;
@@ -10180,6 +10301,7 @@ lpfc_init(void)
static void __exit
lpfc_exit(void)
{
+ misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
if (lpfc_enable_npiv)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e8bb00559943..15ca2a9a0cdd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -48,6 +48,10 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
+ /* First, we MUST have a RPI registered */
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+ return 0;
+
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
*/
@@ -385,6 +389,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!mbox)
goto out;
+ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_unreg_rpi(vport, ndlp);
+
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, ndlp->nlp_rpi);
if (rc) {
@@ -432,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
}
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+ if (rc)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
@@ -445,11 +457,43 @@ out:
return 0;
}
+/**
+ * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object
+ *
+ * This routine is invoked to issue a completion to a rcv'ed
+ * ADISC or PDISC after the paused RPI has been resumed.
+ **/
+static void
+lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint32_t cmd;
+
+ elsiocb = (struct lpfc_iocbq *)mboxq->context1;
+ ndlp = (struct lpfc_nodelist *) mboxq->context2;
+ vport = mboxq->vport;
+ cmd = elsiocb->drvrTimeout;
+
+ if (cmd == ELS_CMD_ADISC) {
+ lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
+ } else {
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
+ ndlp, NULL);
+ }
+ kfree(elsiocb);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
@@ -475,12 +519,43 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd = &cmdiocb->iocb;
if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+
+ /*
+ * As soon as we send ACC, the remote NPort can
+ * start sending us data. Thus, for SLI4 we must
+ * resume the RPI before the ACC goes out.
+ */
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
+ GFP_KERNEL);
+ if (elsiocb) {
+
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
+
+ /* Save the ELS cmd */
+ elsiocb->drvrTimeout = cmd;
+
+ lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi, elsiocb);
+ goto out;
+ }
+ }
+
if (cmd == ELS_CMD_ADISC) {
lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
- NULL);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+ ndlp, NULL);
}
+out:
+ /* If we are authenticated, move to the proper state */
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
return 1;
}
/* Reject this request because invalid parameters */
@@ -1229,7 +1304,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- rc = lpfc_sli4_resume_rpi(ndlp);
+ rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
if (rc) {
/* Stay in state and retry. */
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c60f5d0b3869..88f3a83dbd2e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -39,8 +39,8 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -51,13 +51,19 @@
int _dump_buf_done;
static char *dif_op_str[] = {
- "SCSI_PROT_NORMAL",
- "SCSI_PROT_READ_INSERT",
- "SCSI_PROT_WRITE_STRIP",
- "SCSI_PROT_READ_STRIP",
- "SCSI_PROT_WRITE_INSERT",
- "SCSI_PROT_READ_PASS",
- "SCSI_PROT_WRITE_PASS",
+ "PROT_NORMAL",
+ "PROT_READ_INSERT",
+ "PROT_WRITE_STRIP",
+ "PROT_READ_STRIP",
+ "PROT_WRITE_INSERT",
+ "PROT_READ_PASS",
+ "PROT_WRITE_PASS",
+};
+
+static char *dif_grd_str[] = {
+ "NO_GUARD",
+ "DIF_CRC",
+ "DIX_IP",
};
struct scsi_dif_tuple {
@@ -1280,32 +1286,51 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-/*
- * Given a scsi cmnd, determine the BlockGuard tags to be used with it
+
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT 0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT 0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP 0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK 0x20
+
+/**
+ * lpfc_bg_err_inject - Determine if we should inject an error
+ * @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
* @reftag: (out) BlockGuard reference tag for transmitted data
* @apptag: (out) BlockGuard application tag for transmitted data
* @new_guard (in) Value to replace CRC with if needed
*
- * Returns (1) if error injection was performed, (0) otherwise
- */
+ * Returns BG_ERR_* bit mask or 0 if request ignored
+ **/
static int
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{
struct scatterlist *sgpe; /* s/g prot entry */
struct scatterlist *sgde; /* s/g data entry */
- struct scsi_dif_tuple *src;
+ struct lpfc_scsi_buf *lpfc_cmd = NULL;
+ struct scsi_dif_tuple *src = NULL;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_rport_data *rdata;
uint32_t op = scsi_get_prot_op(sc);
uint32_t blksize;
uint32_t numblks;
sector_t lba;
int rc = 0;
+ int blockoff = 0;
if (op == SCSI_PROT_NORMAL)
return 0;
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
lba = scsi_get_lba(sc);
+
+ /* First check if we need to match the LBA */
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
blksize = lpfc_cmd_blksize(sc);
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
@@ -1314,142 +1339,380 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if ((phba->lpfc_injerr_lba < lba) ||
(phba->lpfc_injerr_lba >= (lba + numblks)))
return 0;
+ if (sgpe) {
+ blockoff = phba->lpfc_injerr_lba - lba;
+ numblks = sg_dma_len(sgpe) /
+ sizeof(struct scsi_dif_tuple);
+ if (numblks < blockoff)
+ blockoff = numblks;
+ }
}
- sgpe = scsi_prot_sglist(sc);
- sgde = scsi_sglist(sc);
+ /* Next check if we need to match the remote NPortID or WWPN */
+ rdata = sc->device->hostdata;
+ if (rdata && rdata->pnode) {
+ ndlp = rdata->pnode;
+
+ /* Make sure we have the right NPortID if one is specified */
+ if (phba->lpfc_injerr_nportid &&
+ (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+ return 0;
- /* Should we change the Reference Tag */
- if (reftag) {
/*
- * If we are SCSI_PROT_WRITE_STRIP, the protection data is
- * being stripped from the wire, thus it doesn't matter.
+ * Make sure we have the right WWPN if one is specified.
+ * wwn[0] should be a non-zero NAA in a good WWPN.
*/
- if ((op == SCSI_PROT_WRITE_PASS) ||
- (op == SCSI_PROT_WRITE_INSERT)) {
- if (phba->lpfc_injerr_wref_cnt) {
+ if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
+ (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+ sizeof(struct lpfc_name)) != 0))
+ return 0;
+ }
+
+ /* Setup a ptr to the protection data if the SCSI host provides it */
+ if (sgpe) {
+ src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+ src += blockoff;
+ lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+ }
+
+ /* Should we change the Reference Tag */
+ if (reftag) {
+ if (phba->lpfc_injerr_wref_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9076 BLKGRD: Injecting reftag error: "
+ "write lba x%lx + x%x oldrefTag x%x\n",
+ (unsigned long)lba, blockoff,
+ be32_to_cpu(src->ref_tag));
+
+ /*
+ * Save the old ref_tag so we can
+ * restore it on completion.
+ */
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_REFTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->ref_tag;
+ }
+ src->ref_tag = cpu_to_be32(0xDEADBEEF);
+ phba->lpfc_injerr_wref_cnt--;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+ break;
+ }
+ /* Drop thru */
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the error
+ * to be sent on the wire. It should be
+ * detected by the Target.
+ */
/* DEADBEEF will be the reftag on the wire */
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_wref_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9078 BLKGRD: Injecting reftag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ *reftag = 0xDEADBEEF;
+ phba->lpfc_injerr_wref_cnt--;
+ if (phba->lpfc_injerr_wref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9081 BLKGRD: Injecting reftag error: "
+ "9077 BLKGRD: Injecting reftag error: "
"write lba x%lx\n", (unsigned long)lba);
+ break;
}
- } else {
- if (phba->lpfc_injerr_rref_cnt) {
+ }
+ if (phba->lpfc_injerr_rref_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_rref_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ if (phba->lpfc_injerr_rref_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9076 BLKGRD: Injecting reftag error: "
+ "9079 BLKGRD: Injecting reftag error: "
"read lba x%lx\n", (unsigned long)lba);
+ break;
}
}
}
/* Should we change the Application Tag */
if (apptag) {
- /*
- * If we are SCSI_PROT_WRITE_STRIP, the protection data is
- * being stripped from the wire, thus it doesn't matter.
- */
- if ((op == SCSI_PROT_WRITE_PASS) ||
- (op == SCSI_PROT_WRITE_INSERT)) {
- if (phba->lpfc_injerr_wapp_cnt) {
+ if (phba->lpfc_injerr_wapp_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ if (src) {
+ /*
+ * For WRITE_PASS, force the error
+ * to be sent on the wire. It should
+ * be detected by the Target.
+ * If blockoff != 0 error will be
+ * inserted in middle of the IO.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9080 BLKGRD: Injecting apptag error: "
+ "write lba x%lx + x%x oldappTag x%x\n",
+ (unsigned long)lba, blockoff,
+ be16_to_cpu(src->app_tag));
+
+ /*
+ * Save the old app_tag so we can
+ * restore it on completion.
+ */
+ if (lpfc_cmd) {
+ lpfc_cmd->prot_data_type =
+ LPFC_INJERR_APPTAG;
+ lpfc_cmd->prot_data_segment =
+ src;
+ lpfc_cmd->prot_data =
+ src->app_tag;
+ }
+ src->app_tag = cpu_to_be16(0xDEAD);
+ phba->lpfc_injerr_wapp_cnt--;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+ break;
+ }
+ /* Drop thru */
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
+ */
/* DEAD will be the apptag on the wire */
*apptag = 0xDEAD;
phba->lpfc_injerr_wapp_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0813 BLKGRD: Injecting apptag error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ *apptag = 0xDEAD;
+ phba->lpfc_injerr_wapp_cnt--;
+ if (phba->lpfc_injerr_wapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9077 BLKGRD: Injecting apptag error: "
+ "0812 BLKGRD: Injecting apptag error: "
"write lba x%lx\n", (unsigned long)lba);
+ break;
}
- } else {
- if (phba->lpfc_injerr_rapp_cnt) {
+ }
+ if (phba->lpfc_injerr_rapp_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
*apptag = 0xDEAD;
phba->lpfc_injerr_rapp_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ if (phba->lpfc_injerr_rapp_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+ rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9078 BLKGRD: Injecting apptag error: "
+ "0814 BLKGRD: Injecting apptag error: "
"read lba x%lx\n", (unsigned long)lba);
+ break;
}
}
}
+
/* Should we change the Guard Tag */
+ if (new_guard) {
+ if (phba->lpfc_injerr_wgrd_cnt) {
+ switch (op) {
+ case SCSI_PROT_WRITE_PASS:
+ rc = BG_ERR_CHECK;
+ /* Drop thru */
+
+ case SCSI_PROT_WRITE_INSERT:
+ /*
+ * For WRITE_INSERT, force the
+ * error to be sent on the wire. It should be
+ * detected by the Target.
+ */
+ phba->lpfc_injerr_wgrd_cnt--;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
- /*
- * If we are SCSI_PROT_WRITE_INSERT, the protection data is
- * being on the wire is being fully generated on the HBA.
- * The host cannot change it or force an error.
- */
- if (((op == SCSI_PROT_WRITE_STRIP) ||
- (op == SCSI_PROT_WRITE_PASS)) &&
- phba->lpfc_injerr_wgrd_cnt) {
- if (sgpe) {
- src = (struct scsi_dif_tuple *)sg_virt(sgpe);
- /*
- * Just inject an error in the first
- * prot block.
- */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9079 BLKGRD: Injecting guard error: "
- "write lba x%lx oldGuard x%x refTag x%x\n",
- (unsigned long)lba, src->guard_tag,
- src->ref_tag);
+ rc |= BG_ERR_TGT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
- src->guard_tag = (uint16_t)new_guard;
- phba->lpfc_injerr_wgrd_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0817 BLKGRD: Injecting guard error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ /*
+ * For WRITE_STRIP and WRITE_PASS,
+ * force the error on data
+ * being copied from SLI-Host to SLI-Port.
+ */
+ phba->lpfc_injerr_wgrd_cnt--;
+ if (phba->lpfc_injerr_wgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
- } else {
- blksize = lpfc_cmd_blksize(sc);
- /*
- * Jump past the first data block
- * and inject an error in the
- * prot data. The prot data is already
- * embedded after the regular data.
- */
- src = (struct scsi_dif_tuple *)
- (sg_virt(sgde) + blksize);
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9080 BLKGRD: Injecting guard error: "
- "write lba x%lx oldGuard x%x refTag x%x\n",
- (unsigned long)lba, src->guard_tag,
- src->ref_tag);
-
- src->guard_tag = (uint16_t)new_guard;
- phba->lpfc_injerr_wgrd_cnt--;
- phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- rc = 1;
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0816 BLKGRD: Injecting guard error: "
+ "write lba x%lx\n", (unsigned long)lba);
+ break;
+ }
+ }
+ if (phba->lpfc_injerr_rgrd_cnt) {
+ switch (op) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_READ_PASS:
+ /*
+ * For READ_STRIP and READ_PASS, force the
+ * error on data being read off the wire. It
+ * should force an IO error to the driver.
+ */
+ phba->lpfc_injerr_rgrd_cnt--;
+ if (phba->lpfc_injerr_rgrd_cnt == 0) {
+ phba->lpfc_injerr_nportid = 0;
+ phba->lpfc_injerr_lba =
+ LPFC_INJERR_LBA_OFF;
+ memset(&phba->lpfc_injerr_wwpn,
+ 0, sizeof(struct lpfc_name));
+ }
+
+ rc = BG_ERR_INIT | BG_ERR_SWAP;
+ /* Signals the caller to swap CRC->CSUM */
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "0818 BLKGRD: Injecting guard error: "
+ "read lba x%lx\n", (unsigned long)lba);
+ }
}
}
+
return rc;
}
#endif
-/*
- * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
+/**
+ * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
+ * the specified SCSI command.
+ * @phba: The Hba for which this call is being executed.
* @sc: The SCSI command to examine
* @txopt: (out) BlockGuard operation for transmitted data
* @rxopt: (out) BlockGuard operation for received data
*
* Returns: zero on success; non-zero if tx and/or rx op cannot be determined
*
- */
+ **/
static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
@@ -1461,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CSUM_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CSUM_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
break;
case SCSI_PROT_NORMAL:
@@ -1490,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- *txop = BG_OP_IN_NODIF_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- *txop = BG_OP_IN_CRC_OUT_CRC;
*rxop = BG_OP_IN_CRC_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- *txop = BG_OP_IN_CRC_OUT_NODIF;
*rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_NORMAL:
@@ -1519,8 +1782,88 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return ret;
}
-/*
- * This function sets up buffer list for protection groups of
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
+ * the specified SCSI command in order to force a guard tag error.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
+{
+ uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+ uint8_t ret = 0;
+
+ if (guard_type == SHOST_DIX_GUARD_IP) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CSUM_OUT_CRC;
+ *txop = BG_OP_IN_CRC_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ break;
+
+ }
+ } else {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+ *txop = BG_OP_IN_NODIF_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ *rxop = BG_OP_IN_CSUM_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_CSUM;
+ break;
+
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ break;
+
+ case SCSI_PROT_NORMAL:
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+/**
+ * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
*
* This is usually used when the HBA is instructed to generate
@@ -1539,12 +1882,11 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
* |more Data BDE's ... (opt)|
* +-------------------------+
*
- * @sc: pointer to scsi command we're working on
- * @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
*
* Note: Data s/g buffers have been dma mapped
- */
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
static int
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct ulp_bde64 *bpl, int datasegcnt)
@@ -1555,6 +1897,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_addr_t physaddr;
int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
+ uint32_t rc;
+ uint32_t checking = 1;
uint32_t reftag;
unsigned blksize;
uint8_t txop, rxop;
@@ -1565,11 +1909,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
- reftag = scsi_get_lba(sc) & 0xffffffff;
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- /* reftag is the only error we can inject here */
- lpfc_bg_err_inject(phba, sc, &reftag, 0, 0);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
#endif
/* setup PDE5 with what we have */
@@ -1592,8 +1941,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
if (datadir == DMA_FROM_DEVICE) {
- bf_set(pde6_ce, pde6, 1);
- bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ce, pde6, checking);
+ bf_set(pde6_re, pde6, checking);
}
bf_set(pde6_ai, pde6, 1);
bf_set(pde6_ae, pde6, 0);
@@ -1627,9 +1976,16 @@ out:
return num_bde;
}
-/*
- * This function sets up buffer list for protection groups of
- * type LPFC_PG_TYPE_DIF_BUF
+/**
+ * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
*
* This is usually used when DIFs are in their own buffers,
* separate from the data. The HBA can then by instructed
@@ -1654,14 +2010,11 @@ out:
* | ... |
* +-------------------------+
*
- * @sc: pointer to scsi command we're working on
- * @bpl: pointer to buffer list for protection groups
- * @datacnt: number of segments of data that have been dma mapped
- * @protcnt: number of segment of protection data that have been dma mapped
- *
* Note: It is assumed that both data and protection s/g buffers have been
* mapped for DMA
- */
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct ulp_bde64 *bpl, int datacnt, int protcnt)
@@ -1681,6 +2034,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
int datadir = sc->sc_data_direction;
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
+ uint32_t rc;
+ uint32_t checking = 1;
uint32_t reftag;
uint8_t txop, rxop;
int num_bde = 0;
@@ -1701,11 +2056,16 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- reftag = scsi_get_lba(sc) & 0xffffffff;
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- /* reftag / guard tag are the only errors we can inject here */
- lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD);
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
#endif
split_offset = 0;
@@ -1729,8 +2089,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
- bf_set(pde6_ce, pde6, 1);
- bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ce, pde6, checking);
+ bf_set(pde6_re, pde6, checking);
bf_set(pde6_ai, pde6, 1);
bf_set(pde6_ae, pde6, 0);
bf_set(pde6_apptagval, pde6, 0);
@@ -1852,13 +2212,358 @@ out:
return num_bde;
}
-/*
+/**
+ * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ * +-------------------------+
+ * start of prot group --> | DI_SEED |
+ * +-------------------------+
+ * | Data SGE |
+ * +-------------------------+
+ * |more Data SGE's ... (opt)|
+ * +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datasegcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t physaddr;
+ int i = 0, num_sge = 0, status;
+ int datadir = sc->sc_data_direction;
+ uint32_t reftag;
+ unsigned blksize;
+ uint8_t txop, rxop;
+ uint32_t rc;
+ uint32_t checking = 1;
+ uint32_t dma_len;
+ uint32_t dma_offset = 0;
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command for pde*/
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ /* setup DISEED with what we have */
+ diseed = (struct sli4_sge_diseed *) sgl;
+ memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+ /* Endianness conversion if necessary */
+ diseed->ref_tag = cpu_to_le32(reftag);
+ diseed->ref_tag_tran = diseed->ref_tag;
+
+ /* setup DISEED with the rest of the info */
+ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+ bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+ if (datadir == DMA_FROM_DEVICE) {
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+ bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+ }
+ bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+ bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+ /* Endianness conversion if necessary for DISEED */
+ diseed->word2 = cpu_to_le32(diseed->word2);
+ diseed->word3 = cpu_to_le32(diseed->word3);
+
+ /* advance bpl and increment sge count */
+ num_sge++;
+ sgl++;
+
+ /* assumption: caller has already run dma_map_sg on command data */
+ scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+ physaddr = sg_dma_address(sgde);
+ dma_len = sg_dma_len(sgde);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ if ((i + 1) == datasegcnt)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ sgl++;
+ num_sge++;
+ }
+
+out:
+ return num_sge;
+}
+
+/**
+ * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream. For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ * +-------------------------+
+ * start of first prot group --> | DISEED |
+ * +-------------------------+
+ * | DIF (Prot SGE) |
+ * +-------------------------+
+ * | Data SGE |
+ * +-------------------------+
+ * |more Data SGE's ... (opt)|
+ * +-------------------------+
+ * start of new prot group --> | DISEED |
+ * +-------------------------+
+ * | ... |
+ * +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ * mapped for DMA
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datacnt, int protcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct scatterlist *sgpe = NULL; /* s/g prot entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t dataphysaddr, protphysaddr;
+ unsigned short curr_data = 0, curr_prot = 0;
+ unsigned int split_offset;
+ unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+ unsigned int protgrp_blks, protgrp_bytes;
+ unsigned int remainder, subtotal;
+ int status;
+ unsigned char pgdone = 0, alldone = 0;
+ unsigned blksize;
+ uint32_t reftag;
+ uint8_t txop, rxop;
+ uint32_t dma_len;
+ uint32_t rc;
+ uint32_t checking = 1;
+ uint32_t dma_offset = 0;
+ int num_sge = 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ if (!sgpe || !sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ sgpe, sgde);
+ return 0;
+ }
+
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
+ goto out;
+
+ /* extract some info from the scsi command */
+ blksize = lpfc_cmd_blksize(sc);
+ reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+ if (rc) {
+ if (rc & BG_ERR_SWAP)
+ lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+ if (rc & BG_ERR_CHECK)
+ checking = 0;
+ }
+#endif
+
+ split_offset = 0;
+ do {
+ /* setup DISEED with what we have */
+ diseed = (struct sli4_sge_diseed *) sgl;
+ memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+ /* Endianness conversion if necessary */
+ diseed->ref_tag = cpu_to_le32(reftag);
+ diseed->ref_tag_tran = diseed->ref_tag;
+
+ /* setup DISEED with the rest of the info */
+ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+ bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+ bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+ bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+ bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+ bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+ /* Endianness conversion if necessary for DISEED */
+ diseed->word2 = cpu_to_le32(diseed->word2);
+ diseed->word3 = cpu_to_le32(diseed->word3);
+
+ /* advance sgl and increment bde count */
+ num_sge++;
+ sgl++;
+
+ /* setup the first BDE that points to protection buffer */
+ protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+ protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+ /* must be integer multiple of the DIF block length */
+ BUG_ON(protgroup_len % 8);
+
+ /* Now setup DIF SGE */
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
+ sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
+ sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ protgrp_blks = protgroup_len / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+
+ /* check if DIF SGE is crossing the 4K boundary; if so split */
+ if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
+ protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
+ protgroup_offset += protgroup_remainder;
+ protgrp_blks = protgroup_remainder / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+ } else {
+ protgroup_offset = 0;
+ curr_prot++;
+ }
+
+ num_sge++;
+
+ /* setup SGE's for data blocks associated with DIF data */
+ pgdone = 0;
+ subtotal = 0; /* total bytes processed for current prot grp */
+ while (!pgdone) {
+ if (!sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9086 BLKGRD:%s Invalid data segment\n",
+ __func__);
+ return 0;
+ }
+ sgl++;
+ dataphysaddr = sg_dma_address(sgde) + split_offset;
+
+ remainder = sg_dma_len(sgde) - split_offset;
+
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ dma_len = remainder;
+ split_offset = 0;
+
+ if ((subtotal + remainder) == protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next prot grp */
+ dma_len = protgrp_bytes - subtotal;
+ split_offset += dma_len;
+ }
+
+ subtotal += dma_len;
+
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ num_sge++;
+ curr_data++;
+
+ if (split_offset)
+ break;
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+ }
+
+ if (protgroup_offset) {
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ sgl++;
+ continue;
+ }
+
+ /* are we done ? */
+ if (curr_prot == protcnt) {
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ alldone = 1;
+ } else if (curr_prot < protcnt) {
+ /* advance to next prot buffer */
+ sgpe = sg_next(sgpe);
+ sgl++;
+
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ } else {
+ /* if we're here, we have a bug */
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9085 BLKGRD: bug in %s\n", __func__);
+ }
+
+ } while (!alldone);
+
+out:
+
+ return num_sge;
+}
+
+/**
+ * lpfc_prot_group_type - Get prtotection group type of SCSI command
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ *
* Given a SCSI command that supports DIF, determine composition of protection
* groups involved in setting up buffer lists
*
- * Returns:
- * for DIF (for both read and write)
- * */
+ * Returns: Protection group type (with or without DIF)
+ *
+ **/
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
{
@@ -1885,13 +2590,17 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
return ret;
}
-/*
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ *
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
* two functions eventually, but for now, it's here
- */
+ **/
static int
-lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
+lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
@@ -2147,7 +2856,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
cmd->sense_buffer[8] = 0; /* Information descriptor type */
cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
cmd->sense_buffer[10] = 0x80; /* Validity bit */
- bghm /= cmd->device->sector_size;
+
+ /* bghm is a "on the wire" FC frame based count */
+ switch (scsi_get_prot_op(cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ bghm /= cmd->device->sector_size;
+ break;
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ bghm /= (cmd->device->sector_size +
+ sizeof(struct scsi_dif_tuple));
+ break;
+ }
failing_sector = scsi_get_lba(cmd);
failing_sector += bghm;
@@ -2160,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
/* No error was reported - problem in FW? */
cmd->result = ScsiResult(DID_ERROR, 0);
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9057 BLKGRD: no errors reported!\n");
+ "9057 BLKGRD: Unknown error reported!\n");
}
out:
@@ -2292,6 +3015,180 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+ int diflen, fcpdl;
+ unsigned blksize;
+
+ fcpdl = scsi_bufflen(sc);
+
+ /* Check if there is protection data on the wire */
+ if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+ /* Read */
+ if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
+ return fcpdl;
+
+ } else {
+ /* Write */
+ if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
+ return fcpdl;
+ }
+
+ /* If protection data on the wire, adjust the count accordingly */
+ blksize = lpfc_cmd_blksize(sc);
+ diflen = (fcpdl / blksize) * 8;
+ fcpdl += diflen;
+ return fcpdl;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ uint32_t num_bde = 0;
+ int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+ int prot_group_type = 0;
+ int fcpdl;
+
+ /*
+ * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
+ * fcp_rsp regions to the first data bde entry
+ */
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ datasegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!datasegcnt))
+ return 1;
+
+ sgl += 1;
+ /* clear the last flag in the fcp_rsp map entry */
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ sgl += 1;
+ lpfc_cmd->seg_cnt = datasegcnt;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9087 BLKGRD: %s: Too many sg segments"
+ " from dma_map_sg. Config %d, seg_cnt"
+ " %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+ switch (prot_group_type) {
+ case LPFC_PG_TYPE_NO_DIF:
+ num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+ datasegcnt);
+ /* we should have 2 or more entries in buffer list */
+ if (num_bde < 2)
+ goto err;
+ break;
+ case LPFC_PG_TYPE_DIF_BUF:{
+ /*
+ * This type indicates that protection buffers are
+ * passed to the driver, so that needs to be prepared
+ * for DMA
+ */
+ protsegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!protsegcnt)) {
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ lpfc_cmd->prot_seg_cnt = protsegcnt;
+ if (lpfc_cmd->prot_seg_cnt
+ > phba->cfg_prot_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+ "9088 BLKGRD: %s: Too many prot sg "
+ "segments from dma_map_sg. Config %d,"
+ "prot_seg_cnt %d\n", __func__,
+ phba->cfg_prot_sg_seg_cnt,
+ lpfc_cmd->prot_seg_cnt);
+ dma_unmap_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ datadir);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+ datasegcnt, protsegcnt);
+ /* we should have 3 or more entries in buffer list */
+ if (num_bde < 3)
+ goto err;
+ break;
+ }
+ case LPFC_PG_TYPE_INVALID:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9083 Unexpected protection group %i\n",
+ prot_group_type);
+ return 1;
+ }
+ }
+
+ fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+
+ fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
+
+ return 0;
+err:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9084 Could not setup all needed BDE's"
+ "prot_group_type=%d, num_bde=%d\n",
+ prot_group_type, num_bde);
+ return 1;
+}
+
+/**
* lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
* @phba: The Hba for which this call is being executed.
* @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -2310,6 +3207,25 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/**
+ * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * using BlockGuard.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
+static inline int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
* lpfc_send_scsi_error_event - Posts an event when there is SCSI error
* @phba: Pointer to hba context object.
* @vport: Pointer to vport object.
@@ -2639,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->prot_data_type) {
+ struct scsi_dif_tuple *src = NULL;
+
+ src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+ /*
+ * Used to restore any changes to protection
+ * data for error injection.
+ */
+ switch (lpfc_cmd->prot_data_type) {
+ case LPFC_INJERR_REFTAG:
+ src->ref_tag =
+ lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_APPTAG:
+ src->app_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ case LPFC_INJERR_GUARD:
+ src->guard_tag =
+ (uint16_t)lpfc_cmd->prot_data;
+ break;
+ default:
+ break;
+ }
+
+ lpfc_cmd->prot_data = 0;
+ lpfc_cmd->prot_data_type = 0;
+ lpfc_cmd->prot_data_segment = NULL;
+ }
+#endif
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@@ -3072,12 +4019,14 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
case LPFC_PCI_DEV_LP:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+ phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+ phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
break;
@@ -3238,20 +4187,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = err;
goto out_fail_command;
}
- /*
- * Do not let the mid-layer retry I/O too fast. If an I/O is retried
- * without waiting a bit then indicate that the device is busy.
- */
- if (cmnd->retries &&
- time_before(jiffies, (cmnd->jiffies_at_alloc +
- msecs_to_jiffies(LPFC_RETRY_PAUSE *
- cmnd->retries))))
- return SCSI_MLQUEUE_DEVICE_BUSY;
ndlp = rdata->pnode;
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
- (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) ||
- (phba->sli_rev == LPFC_SLI_REV4))) {
+ (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
@@ -3297,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
- "str=%s\n",
- cmnd->cmnd[0], scsi_get_prot_op(cmnd),
- dif_op_str[scsi_get_prot_op(cmnd)]);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
- "%02x %02x %02x %02x %02x\n",
- cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
- cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
- cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
- cmnd->cmnd[9]);
+ "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
+ "guard=%s\n", cmnd->cmnd[0],
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
- "count %u\n",
+ "cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request));
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
- "count %u cmd=%p\n",
+ "cnt %u, wpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
- cmnd);
+ (cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9038 BLKGRD: rcvd unprotected cmd:"
- "%02x op:%02x str=%s\n",
- cmnd->cmnd[0], scsi_get_prot_op(cmnd),
- dif_op_str[scsi_get_prot_op(cmnd)]);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9039 BLKGRD: CDB: %02x %02x %02x "
- "%02x %02x %02x %02x %02x %02x %02x\n",
- cmnd->cmnd[0], cmnd->cmnd[1],
- cmnd->cmnd[2], cmnd->cmnd[3],
- cmnd->cmnd[4], cmnd->cmnd[5],
- cmnd->cmnd[6], cmnd->cmnd[7],
- cmnd->cmnd[8], cmnd->cmnd[9]);
+ "9038 BLKGRD: rcvd unprotected cmd:"
+ "%02x op=%s guard=%s\n", cmnd->cmnd[0],
+ dif_op_str[scsi_get_prot_op(cmnd)],
+ dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
- "count %u\n",
+ "cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request));
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9041 dbg: WRITE @ sector %llu, "
- "count %u cmd=%p\n",
- (unsigned long long)scsi_get_lba(cmnd),
- blk_rq_sectors(cmnd->request), cmnd);
- else
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9042 dbg: parser not implemented\n");
+ "9041 dbg: WRITE @ sector %llu, "
+ "cnt %u, wpt %d\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ blk_rq_sectors(cmnd->request),
+ (cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 9075a08cf781..21a2ffe67eac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -150,9 +150,18 @@ struct lpfc_scsi_buf {
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ /* Used to restore any changes to protection data for error injection */
+ void *prot_data_segment;
+ uint32_t prot_data;
+ uint32_t prot_data_type;
+#define LPFC_INJERR_REFTAG 1
+#define LPFC_INJERR_APPTAG 2
+#define LPFC_INJERR_GUARD 3
+#endif
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
-#define LPFC_RETRY_PAUSE 300
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 23a27592388c..dbaf5b963bff 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -293,7 +293,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
}
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
- bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
+ bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+ (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
/* PCI read to flush PCI pipeline on re-arming for INTx mode */
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
@@ -372,7 +374,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
- bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
+ bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
+ (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
+ bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
return released;
}
@@ -554,81 +558,6 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
}
/**
- * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: nodelist pointer for this target.
- * @xritag: xri used in this exchange.
- * @rxid: Remote Exchange ID.
- * @send_rrq: Flag used to determine if we should send rrq els cmd.
- *
- * This function is called with hbalock held.
- * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
- * rrq struct and adds it to the active_rrq_list.
- *
- * returns 0 for rrq slot for this xri
- * < 0 Were not able to get rrq mem or invalid parameter.
- **/
-static int
-__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
-{
- struct lpfc_node_rrq *rrq;
- int empty;
- uint32_t did = 0;
-
-
- if (!ndlp)
- return -EINVAL;
-
- if (!phba->cfg_enable_rrq)
- return -EINVAL;
-
- if (phba->pport->load_flag & FC_UNLOADING) {
- phba->hba_flag &= ~HBA_RRQ_ACTIVE;
- goto out;
- }
- did = ndlp->nlp_DID;
-
- /*
- * set the active bit even if there is no mem available.
- */
- if (NLP_CHK_FREE_REQ(ndlp))
- goto out;
-
- if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
- goto out;
-
- if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
- goto out;
-
- rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
- if (rrq) {
- rrq->send_rrq = send_rrq;
- rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
- rrq->ndlp = ndlp;
- rrq->nlp_DID = ndlp->nlp_DID;
- rrq->vport = ndlp->vport;
- rrq->rxid = rxid;
- empty = list_empty(&phba->active_rrq_list);
- rrq->send_rrq = send_rrq;
- list_add_tail(&rrq->list, &phba->active_rrq_list);
- if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
- phba->hba_flag |= HBA_RRQ_ACTIVE;
- if (empty)
- lpfc_worker_wake_up(phba);
- }
- return 0;
- }
-out:
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2921 Can't set rrq active xri:0x%x rxid:0x%x"
- " DID:0x%x Send:%d\n",
- xritag, rxid, did, send_rrq);
- return -EINVAL;
-}
-
-/**
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @xritag: xri used in this exchange.
@@ -856,15 +785,68 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
**/
int
lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+ uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
- int ret;
unsigned long iflags;
+ struct lpfc_node_rrq *rrq;
+ int empty;
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+ /*
+ * set the active bit even if there is no mem available.
+ */
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
+ if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+ goto out;
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+ if (!rrq) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, ndlp->nlp_DID, send_rrq);
+ return -EINVAL;
+ }
+ rrq->send_rrq = send_rrq;
+ rrq->xritag = xritag;
+ rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+ rrq->ndlp = ndlp;
+ rrq->nlp_DID = ndlp->nlp_DID;
+ rrq->vport = ndlp->vport;
+ rrq->rxid = rxid;
+ rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
- ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
+ empty = list_empty(&phba->active_rrq_list);
+ list_add_tail(&rrq->list, &phba->active_rrq_list);
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
+ if (empty)
+ lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
- return ret;
+ return 0;
+out:
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, ndlp->nlp_DID, send_rrq);
+ return -EINVAL;
}
/**
@@ -5629,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
rc = -ENOMEM;
goto free_vpi_ids;
}
+ phba->sli4_hba.max_cfg_param.xri_used = 0;
+ phba->sli4_hba.xri_count = 0;
phba->sli4_hba.xri_ids = kzalloc(count *
sizeof(uint16_t),
GFP_KERNEL);
@@ -6163,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_free_mbox;
}
+ lpfc_sli4_node_prep(phba);
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
@@ -7267,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
out_not_finished:
spin_lock_irqsave(&phba->hbalock, iflags);
- mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
- __lpfc_mbox_cmpl_put(phba, mboxq);
- /* Release the token */
- psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- phba->sli.mbox_active = NULL;
+ if (phba->sli.mbox_active) {
+ mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+ __lpfc_mbox_cmpl_put(phba, mboxq);
+ /* Release the token */
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ }
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_NOT_FINISHED;
@@ -7555,6 +7542,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl = (struct sli4_sge *)sglq->sgl;
icmd = &piocbq->iocb;
+ if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+ return sglq->sli4_xritag;
if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = icmd->un.genreq64.bdl.bdeSize /
sizeof(struct ulp_bde64);
@@ -7756,6 +7745,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
*pcmd == ELS_CMD_PLOGI)) {
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
@@ -7763,7 +7754,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
phba->vpi_ids[phba->pport->vpi]);
- } else if (iocbq->context1) {
+ } else if (pcmd && iocbq->context1) {
bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7830,12 +7821,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+ if (iocbq->iocb_flag & LPFC_IO_DIF) {
+ iocbq->iocb_flag &= ~LPFC_IO_DIF;
+ bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+ }
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -7849,12 +7844,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+ if (iocbq->iocb_flag & LPFC_IO_DIF) {
+ iocbq->iocb_flag &= ~LPFC_IO_DIF;
+ bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+ }
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=IO_TAG wqe=reserved */
@@ -7982,6 +7981,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
xritag = 0;
break;
case CMD_XMIT_BLS_RSP64_CX:
+ ndlp = (struct lpfc_nodelist *)iocbq->context1;
/* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
* iocbq from scratch.
@@ -8008,8 +8008,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
- iocbq->iocb.ulpContext);
+ phba->vpi_ids[phba->pport->vpi]);
bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
LPFC_WQE_LENLOC_NONE);
@@ -8073,8 +8080,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
if (pring->txq_cnt) {
@@ -8383,20 +8389,32 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
struct lpfc_vport *vport;
+ uint32_t ext_status = 0;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3115 Node Context not found, driver "
"ignoring abts err event\n");
+ return;
+ }
+
vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
- "vpi %d rpi %d xri x%x status 0x%x\n",
+ "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
ndlp->vport->vpi, ndlp->nlp_rpi,
bf_get(lpfc_wcqe_xa_xri, axri),
- bf_get(lpfc_wcqe_xa_status, axri));
+ bf_get(lpfc_wcqe_xa_status, axri),
+ axri->parameter);
- if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+ /*
+ * Catch the ABTS protocol failure case. Older OCe FW releases returned
+ * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+ * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+ */
+ ext_status = axri->parameter & WCQE_PARAM_MASK;
+ if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+ ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
lpfc_sli_abts_recover_port(vport, ndlp);
}
@@ -9802,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
unsigned long timeout;
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
- spin_unlock_irq(&phba->hbalock);
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- spin_lock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
@@ -9826,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
*/
break;
}
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
+
lpfc_sli_mbox_sys_flush(phba);
}
@@ -10653,12 +10672,14 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *wcqe)
{
unsigned long iflags;
+ uint32_t status;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
sizeof(struct lpfc_iocbq) - offset);
/* Map WCQE parameters into irspiocb parameters */
- pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
pIocbIn->iocb.un.fcpi.fcpi_parm =
@@ -10671,6 +10692,44 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
}
+ /* Convert BG errors for completion status */
+ if (status == CQE_STATUS_DI_ERROR) {
+ pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+
+ if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+ pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
+ else
+ pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
+
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_GUARD_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_APPTAG_ERR_MASK;
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_REFTAG_ERR_MASK;
+
+ /* Check to see if there was any good data before the error */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ BGS_HI_WATER_MARK_PRESENT_MASK;
+ pIocbIn->iocb.unsli3.sli3_bg.bghm =
+ wcqe->total_data_placed;
+ }
+
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
+ pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+ (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
+ }
+
/* Pick up HBA exchange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13227,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, index;
uint32_t mbox_tmo;
- uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+ uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
uint16_t xritag_start = 0, lxri = 0;
struct lpfc_rsrc_blks *rsrc_blk;
int cnt, ttl_cnt, rc = 0;
@@ -13249,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
cnt = 0;
ttl_cnt = 0;
+ post_els_xri_cnt = els_xri_cnt;
list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
list) {
rsrc_start = rsrc_blk->rsrc_start;
@@ -13258,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
"3014 Working ELS Extent start %d, cnt %d\n",
rsrc_start, rsrc_size);
- loop_cnt = min(els_xri_cnt, rsrc_size);
- if (ttl_cnt + loop_cnt >= els_xri_cnt) {
- loop_cnt = els_xri_cnt - ttl_cnt;
- ttl_cnt = els_xri_cnt;
- }
+ loop_cnt = min(post_els_xri_cnt, rsrc_size);
+ if (loop_cnt < post_els_xri_cnt) {
+ post_els_xri_cnt -= loop_cnt;
+ ttl_cnt += loop_cnt;
+ } else
+ ttl_cnt += post_els_xri_cnt;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14042,6 +14103,13 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
{
if (cmd_iocbq)
lpfc_sli_release_iocbq(phba, cmd_iocbq);
+
+ /* Failure means BLS ABORT RSP did not get delivered to remote node*/
+ if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
+ rsp_iocbq->iocb.ulpStatus,
+ rsp_iocbq->iocb.un.ulpWord[4]);
}
/**
@@ -14151,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
* field and RX_ID from ABTS for RX_ID field.
*/
bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
- * XRI_TAG and RX_ID fields.
+ * XRI_TAG field.
*/
bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
}
+ bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
@@ -14748,7 +14815,8 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
* provided rpi via a bitmask.
**/
int
-lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
@@ -14761,6 +14829,13 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
+ if (cmpl) {
+ mboxq->mbox_cmpl = cmpl;
+ mboxq->context1 = arg;
+ mboxq->context2 = ndlp;
+ } else
+ mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mboxq->vport = ndlp->vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -14982,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
LPFC_MBOXQ_t *mboxq;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+ phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 29c13b63e323..3290b8e7ab65 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,6 +69,7 @@ struct lpfc_iocbq {
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
+#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3f266e2c54e0..c19d139618b7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -633,7 +633,8 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
+ void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index dd044d01a07f..25cefc254b76 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2011 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.28"
+#define LPFC_DRIVER_VERSION "8.3.30"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index e6173376605d..e5cd8d8d4ce7 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -22,7 +22,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
-#include <asm/system.h>
#include <asm/pci-bridge.h>
#include <asm/macio.h>
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 2bccfbe5661e..24828b54773a 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -43,7 +43,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 15eefa1d61fd..4d39a9ffc081 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
struct scatterlist *sg;
sg = scsi_sglist(cmd);
- buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg)) + sg->offset;
memset(buf, 0, cmd->cmnd[4]);
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 494474779532..e8a04ae3276a 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -33,7 +33,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
-#include <asm/system.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index a78036f5e1a6..8a59a772fdf2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -132,7 +132,7 @@ static int mpt2sas_remove_dead_ioc_func(void *arg)
pdev = ioc->pdev;
if ((pdev == NULL))
return -1;
- pci_remove_bus_device(pdev);
+ pci_stop_and_remove_bus_device(pdev);
return 0;
}
@@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
return;
/* eat the loginfos associated with task aborts */
- if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
0x31140000 || log_info == 0x31130000))
return;
@@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
{
int i = 0;
char desc[16];
- u8 revision;
u32 iounit_pg1_flags;
u32 bios_version;
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
"ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
@@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF,
- revision,
+ ioc->pdev->revision,
(bios_version & 0xFF000000) >> 24,
(bios_version & 0x00FF0000) >> 16,
(bios_version & 0x0000FF00) >> 8,
@@ -2575,6 +2573,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
+ goto out;
+ }
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 36ea0b2d8020..2b4d37613d32 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -149,7 +149,7 @@ _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
desc = "raid_config";
break;
case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
- desc = "driver_mappping";
+ desc = "driver_mapping";
break;
}
break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 7fceb899029e..3b9a28efea82 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg)
{
struct mpt2_ioctl_iocinfo karg;
struct MPT2SAS_ADAPTER *ioc;
- u8 revision;
if (copy_from_user(&karg, arg, sizeof(karg))) {
printk(KERN_ERR "failure at %s:%d/%s()!\n",
@@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg)
karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
if (ioc->pfacts)
karg.port_number = ioc->pfacts[0].PortNumber;
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
- karg.hw_rev = revision;
+ karg.hw_rev = ioc->pdev->revision;
karg.pci_id = ioc->pdev->device;
karg.subsystem_device = ioc->pdev->subsystem_device;
karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 193e33e28e49..d953a57e779d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -5744,7 +5744,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
}
/**
- * _scsih_sas_broadcast_primative_event - handle broadcast events
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
@@ -5752,7 +5752,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
* Return nothing.
*/
static void
-_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
+_scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
struct scsi_cmnd *scmd;
@@ -7263,7 +7263,7 @@ _firmware_event_work(struct work_struct *work)
fw_event);
break;
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
- _scsih_sas_broadcast_primative_event(ioc,
+ _scsih_sas_broadcast_primitive_event(ioc,
fw_event);
break;
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 6f589195746c..cc59dff3810b 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -60,7 +60,6 @@ static struct scsi_host_template mvs_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
- .slave_destroy = sas_slave_destroy,
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -74,7 +73,6 @@ static struct scsi_host_template mvs_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = mvst_host_attrs,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a4884a57cf79..fd3b2839843b 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -308,7 +308,7 @@ int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (mvs_prv->scan_finished == 0)
return 0;
- scsi_flush_work(shost);
+ sas_drain_work(sha);
return 1;
}
@@ -893,9 +893,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
- if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
- spin_unlock_irq(dev->sata_dev.ap->lock);
-
spin_lock_irqsave(&mvi->lock, flags);
rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
if (rc)
@@ -906,9 +903,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
(MVS_CHIP_SLOT_SZ - 1));
spin_unlock_irqrestore(&mvi->lock, flags);
- if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
- spin_lock_irq(dev->sata_dev.ap->lock);
-
return rc;
}
@@ -1480,10 +1474,11 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
{
int rc;
- struct sas_phy *phy = sas_find_local_phy(dev);
+ struct sas_phy *phy = sas_get_local_phy(dev);
int reset_type = (dev->dev_type == SATA_DEV ||
(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
rc = sas_phy_reset(phy, reset_type);
+ sas_put_local_phy(phy);
msleep(2000);
return rc;
}
@@ -1885,11 +1880,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
tstat->stat = SAM_STAT_GOOD;
- to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+ to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info),
sg_dma_len(sg_resp));
- kunmap_atomic(to, KM_IRQ0);
+ kunmap_atomic(to);
break;
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 4b3b4755945c..5982a587babc 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -115,7 +115,6 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 002924963cd8..62b616891a33 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -38,7 +38,6 @@
#include <linux/dma-mapping.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index de0b1a704fb5..21883a2d6324 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -54,7 +54,6 @@ static const char * osst_version = "0.99.4";
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/dma.h>
-#include <asm/system.h>
/* The driver prints some debugging information on the console if DEBUG
is defined and non-zero. */
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index f2018b46f494..2f72c9807b12 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -113,7 +113,6 @@
#include <linux/module.h>
-#include <asm/system.h>
#include <linux/signal.h>
#include <linux/proc_fs.h>
#include <asm/io.h>
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
index 4efa4d0950e5..9241c7826034 100644
--- a/drivers/scsi/pm8001/pm8001_chips.h
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -46,9 +46,9 @@ static inline u32 pm8001_read_32(void *virt_addr)
return *((u32 *)virt_addr);
}
-static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
+static inline void pm8001_write_32(void *addr, u32 offset, __le32 val)
{
- *((u32 *)(addr + offset)) = val;
+ *((__le32 *)(addr + offset)) = val;
}
static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index b7b92f7be2aa..9d82ee5c10de 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -338,26 +338,25 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
}
/**
- * bar4_shift - function is called to shift BAR base address
- * @pm8001_ha : our hba card information
+ * pm8001_bar4_shift - function is called to shift BAR base address
+ * @pm8001_ha : our hba card infomation
* @shiftValue : shifting value in memory bar.
*/
-static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
{
u32 regVal;
- u32 max_wait_count;
+ unsigned long start;
/* program the inbound AXI translation Lower Address */
pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
/* confirm the setting is written */
- max_wait_count = 1 * 1000 * 1000; /* 1 sec */
+ start = jiffies + HZ; /* 1 sec */
do {
- udelay(1);
regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
- } while ((regVal != shiftValue) && (--max_wait_count));
+ } while ((regVal != shiftValue) && time_before(jiffies, start));
- if (!max_wait_count) {
+ if (regVal != shiftValue) {
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
" = 0x%x\n", regVal));
@@ -375,6 +374,7 @@ static void __devinit
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
{
u32 value, offset, i;
+ unsigned long flags;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -388,16 +388,23 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
* Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
* Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
*/
- if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
+ }
for (i = 0; i < 4; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
- if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
+ }
for (i = 4; i < 8; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
@@ -421,7 +428,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
- bar4_shift(pm8001_ha, 0x0);
+ pm8001_bar4_shift(pm8001_ha, 0x0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
@@ -437,6 +445,7 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
u32 offset;
u32 value;
u32 i;
+ unsigned long flags;
#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -445,24 +454,30 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
/* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
- if (-1 == bar4_shift(pm8001_ha,
- OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
+ }
for (i = 0; i < 4; i++) {
offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
pm8001_cw32(pm8001_ha, 2, offset, value);
}
- if (-1 == bar4_shift(pm8001_ha,
- OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
+ }
for (i = 4; i < 8; i++) {
offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
pm8001_cw32(pm8001_ha, 2, offset, value);
}
/*set the shifted destination address to 0x0 to avoid error operation */
- bar4_shift(pm8001_ha, 0x0);
+ pm8001_bar4_shift(pm8001_ha, 0x0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
@@ -607,7 +622,8 @@ static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
update_inbnd_queue_table(pm8001_ha, 0);
update_outbnd_queue_table(pm8001_ha, 0);
mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
- mpi_set_open_retry_interval_reg(pm8001_ha, 7);
+ /* 7->130ms, 34->500ms, 119->1.5s */
+ mpi_set_open_retry_interval_reg(pm8001_ha, 119);
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
PM8001_INIT_DBG(pm8001_ha,
@@ -688,8 +704,11 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("Firmware is ready for reset .\n"));
} else {
- /* Trigger NMI twice via RB6 */
- if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+ unsigned long flags;
+ /* Trigger NMI twice via RB6 */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
RB6_ACCESS_REG));
@@ -715,8 +734,10 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
}
return 0;
}
@@ -733,6 +754,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
u32 regVal, toggleVal;
u32 max_wait_count;
u32 regVal1, regVal2, regVal3;
+ unsigned long flags;
/* step1: Check FW is ready for soft reset */
if (soft_reset_ready_check(pm8001_ha) != 0) {
@@ -743,7 +765,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 2: clear NMI status register on AAP1 and IOP, write the same
value to clear */
/* map 0x60000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
MBIC_AAP1_ADDR_BASE));
@@ -754,7 +778,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
/* map 0x70000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
MBIC_IOP_ADDR_BASE));
@@ -796,7 +821,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* read required registers for confirmming */
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
GSM_ADDR_BASE));
@@ -862,7 +888,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 5: delay 10 usec */
udelay(10);
/* step 5-b: set GPIO-0 output control to tristate anyway */
- if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
GPIO_ADDR_BASE));
@@ -878,7 +905,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* Step 6: Reset the IOP and AAP1 */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
SPC_TOP_LEVEL_ADDR_BASE));
@@ -915,7 +943,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 11: reads and sets the GSM Configuration and Reset Register */
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
GSM_ADDR_BASE));
@@ -968,7 +997,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 13: bring the IOP and AAP1 out of reset */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
- if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
SPC_TOP_LEVEL_ADDR_BASE));
@@ -1010,6 +1040,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@@ -1039,9 +1070,12 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_3)));
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
}
+ pm8001_bar4_shift(pm8001_ha, 0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("SPC soft reset Complete\n"));
@@ -1157,8 +1191,8 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
msi_index += MSIX_TABLE_BASE;
pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
-
}
+
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
@@ -1212,7 +1246,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
consumer_index = pm8001_read_32(circularQ->ci_virt);
circularQ->consumer_index = cpu_to_le32(consumer_index);
if (((circularQ->producer_idx + bcCount) % 256) ==
- circularQ->consumer_index) {
+ le32_to_cpu(circularQ->consumer_index)) {
*messagePtr = NULL;
return -1;
}
@@ -1321,7 +1355,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
u32 header_tmp;
do {
/* If there are not-yet-delivered messages ... */
- if (circularQ->producer_index != circularQ->consumer_idx) {
+ if (le32_to_cpu(circularQ->producer_index)
+ != circularQ->consumer_idx) {
/*Get the pointer to the circular queue buffer element*/
msgHeader = (struct mpi_msg_hdr *)
(circularQ->base_virt +
@@ -1329,14 +1364,14 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
- if (0 != (msgHeader_tmp & 0x80000000)) {
+ if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
- (msgHeader_tmp & 0xfff)) {
+ (le32_to_cpu(msgHeader_tmp) & 0xfff)) {
*messagePtr1 =
((u8 *)msgHeader) +
sizeof(struct mpi_msg_hdr);
- *pBC = (u8)((msgHeader_tmp >> 24) &
- 0x1f);
+ *pBC = (u8)((le32_to_cpu(msgHeader_tmp)
+ >> 24) & 0x1f);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk(": CI=%d PI=%d "
"msgHeader=%x\n",
@@ -1347,8 +1382,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
circularQ->consumer_idx =
(circularQ->consumer_idx +
- ((msgHeader_tmp >> 24) & 0x1f))
- % 256;
+ ((le32_to_cpu(msgHeader_tmp)
+ >> 24) & 0x1f)) % 256;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@@ -1360,7 +1395,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
circularQ->consumer_idx =
(circularQ->consumer_idx +
- ((msgHeader_tmp >> 24) & 0x1f)) % 256;
+ ((le32_to_cpu(msgHeader_tmp) >> 24) &
+ 0x1f)) % 256;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@@ -1376,7 +1412,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
}
- } while (circularQ->producer_index != circularQ->consumer_idx);
+ } while (le32_to_cpu(circularQ->producer_index) !=
+ circularQ->consumer_idx);
/* while we don't have any more not-yet-delivered message */
/* report empty */
return MPI_IO_STATUS_BUSY;
@@ -1388,24 +1425,191 @@ static void pm8001_work_fn(struct work_struct *work)
struct pm8001_device *pm8001_dev;
struct domain_device *dev;
+ /*
+ * So far, all users of this stash an associated structure here.
+ * If we get here, and this pointer is null, then the action
+ * was cancelled. This nullification happens when the device
+ * goes away.
+ */
+ pm8001_dev = pw->data; /* Most stash device structure */
+ if ((pm8001_dev == NULL)
+ || ((pw->handler != IO_XFER_ERROR_BREAK)
+ && (pm8001_dev->dev_type == NO_DEVICE))) {
+ kfree(pw);
+ return;
+ }
+
switch (pw->handler) {
+ case IO_XFER_ERROR_BREAK:
+ { /* This one stashes the sas_task instead */
+ struct sas_task *t = (struct sas_task *)pm8001_dev;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ unsigned long flags, flags1;
+ struct task_status_struct *ts;
+ int i;
+
+ if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
+ break; /* Task still on lu */
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ break; /* Task got completed by another */
+ }
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+ /* Search for a possible ccb that matches the task */
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ tag = ccb->ccb_tag;
+ if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ break;
+ }
+ if (!ccb) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ break; /* Task got freed by another */
+ }
+ ts = &t->task_status;
+ ts->resp = SAS_TASK_COMPLETE;
+ /* Force the midlayer to retry */
+ ts->stat = SAS_QUEUE_FULL;
+ pm8001_dev = ccb->device;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+ t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ t->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
+ " done with event 0x%x resp 0x%x stat 0x%x but"
+ " aborted by upper layer!\n",
+ t, pw->handler, ts->resp, ts->stat));
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ }
+ } break;
+ case IO_XFER_OPEN_RETRY_TIMEOUT:
+ { /* This one stashes the sas_task instead */
+ struct sas_task *t = (struct sas_task *)pm8001_dev;
+ u32 tag;
+ struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ unsigned long flags, flags1;
+ int i, ret = 0;
+
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+
+ ret = pm8001_query_task(t);
+
+ PM8001_IO_DBG(pm8001_ha,
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC:
+ pm8001_printk("...Task on lu\n");
+ break;
+
+ case TMF_RESP_FUNC_COMPLETE:
+ pm8001_printk("...Task NOT on lu\n");
+ break;
+
+ default:
+ pm8001_printk("...query task failed!!!\n");
+ break;
+ });
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ spin_lock_irqsave(&t->task_state_lock, flags1);
+
+ if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+ (void)pm8001_abort_task(t);
+ break; /* Task got completed by another */
+ }
+
+ spin_unlock_irqrestore(&t->task_state_lock, flags1);
+
+ /* Search for a possible ccb that matches the task */
+ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ tag = ccb->ccb_tag;
+ if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ break;
+ }
+ if (!ccb) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
+ (void)pm8001_abort_task(t);
+ break; /* Task got freed by another */
+ }
+
+ pm8001_dev = ccb->device;
+ dev = pm8001_dev->sas_device;
+
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC: /* task on lu */
+ ccb->open_retry = 1; /* Snub completion */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ ret = pm8001_abort_task(t);
+ ccb->open_retry = 0;
+ switch (ret) {
+ case TMF_RESP_FUNC_SUCC:
+ case TMF_RESP_FUNC_COMPLETE:
+ break;
+ default: /* device misbehavior */
+ ret = TMF_RESP_FUNC_FAILED;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("...Reset phy\n"));
+ pm8001_I_T_nexus_reset(dev);
+ break;
+ }
+ break;
+
+ case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ /* Do we need to abort the task locally? */
+ break;
+
+ default: /* device misbehavior */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ ret = TMF_RESP_FUNC_FAILED;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("...Reset phy\n"));
+ pm8001_I_T_nexus_reset(dev);
+ }
+
+ if (ret == TMF_RESP_FUNC_FAILED)
+ t = NULL;
+ pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
+ PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
+ } break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
- pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
- pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
@@ -1460,6 +1664,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
status = le32_to_cpu(psspPayload->status);
tag = le32_to_cpu(psspPayload->tag);
ccb = &pm8001_ha->ccb_info[tag];
+ if ((status == IO_ABORTED) && ccb->open_retry) {
+ /* Being completed by another */
+ ccb->open_retry = 0;
+ return;
+ }
pm8001_dev = ccb->device;
param = le32_to_cpu(psspPayload->param);
@@ -1515,6 +1724,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_ERROR_BREAK\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
+ /* Force the midlayer to retry */
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
PM8001_IO_DBG(pm8001_ha,
@@ -1719,9 +1930,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_XFER_ERROR_BREAK:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_BREAK\n"));
- ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAS_INTERRUPTED;
- break;
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+ return;
case IO_XFER_ERROR_PHY_NOT_READY:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
@@ -1800,10 +2010,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_XFER_OPEN_RETRY_TIMEOUT:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
- ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAS_OPEN_REJECT;
- ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
- break;
+ pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+ return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
@@ -1877,7 +2085,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
- unsigned long flags = 0;
u32 param;
u32 status;
u32 tag;
@@ -1886,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct ata_task_resp *resp ;
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
+ unsigned long flags;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
@@ -2016,9 +2224,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*in order to force CPU ordering*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2036,9 +2244,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2064,9 +2272,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2131,9 +2339,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2155,9 +2363,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2190,16 +2398,16 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
}
}
@@ -2207,7 +2415,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
{
struct sas_task *t;
- unsigned long flags = 0;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
@@ -2217,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
+ unsigned long flags;
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
@@ -2287,9 +2495,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@@ -2402,16 +2610,16 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
+ spin_lock_irq(&pm8001_ha->lock);
}
}
@@ -2857,7 +3065,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
memset((u8 *)&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
- payload.tag = 1;
+ payload.tag = cpu_to_le32(1);
payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
@@ -2929,9 +3137,9 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SAS;
phy->identify.device_type = deviceType;
phy->phy_attached = 1;
- if (phy->identify.device_type == SAS_END_DEV)
+ if (phy->identify.device_type == SAS_END_DEVICE)
phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
- else if (phy->identify.device_type != NO_DEVICE)
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
@@ -3075,7 +3283,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
(struct dev_reg_resp *)(piomb + 4);
htag = le32_to_cpu(registerRespPayload->tag);
- ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
+ ccb = &pm8001_ha->ccb_info[htag];
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
@@ -3149,7 +3357,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct fw_control_ex fw_control_context;
struct fw_flash_Update_resp *ppayload =
(struct fw_flash_Update_resp *)(piomb + 4);
- u32 tag = le32_to_cpu(ppayload->tag);
+ u32 tag = ppayload->tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
status = le32_to_cpu(ppayload->status);
memcpy(&fw_control_context,
@@ -3238,13 +3446,12 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct task_abort_resp *pPayload =
(struct task_abort_resp *)(piomb + 4);
- ccb = &pm8001_ha->ccb_info[pPayload->tag];
- t = ccb->task;
-
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
scp = le32_to_cpu(pPayload->scp);
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk(" status = 0x%x\n", status));
if (t == NULL)
@@ -3270,7 +3477,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();
t->task_done(t);
return 0;
@@ -3497,7 +3704,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
u32 pHeader = (u32)*(u32 *)piomb;
- u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
+ u8 opc = (u8)(pHeader & 0xFFF);
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
@@ -3532,7 +3739,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case OPC_OUB_DEREG_DEV:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("unresgister the deviece\n"));
+ pm8001_printk("unregister the device\n"));
mpi_dereg_resp(pm8001_ha, piomb);
break;
case OPC_OUB_GET_DEV_HANDLE:
@@ -3664,9 +3871,11 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 bc = 0;
+ u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
+ unsigned long flags;
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[0];
do {
ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
@@ -3677,16 +3886,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
}
if (MPI_IO_STATUS_BUSY == ret) {
- u32 producer_idx;
/* Update the producer index from SPC */
- producer_idx = pm8001_read_32(circularQ->pi_virt);
- circularQ->producer_index = cpu_to_le32(producer_idx);
- if (circularQ->producer_index ==
+ circularQ->producer_index =
+ cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+ if (le32_to_cpu(circularQ->producer_index) ==
circularQ->consumer_idx)
/* OQ is empty */
break;
}
} while (1);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return ret;
}
@@ -3712,9 +3921,9 @@ pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
}
}
-static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
+static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
{
- psmp_cmd->tag = cpu_to_le32(hTag);
+ psmp_cmd->tag = hTag;
psmp_cmd->device_id = cpu_to_le32(deviceID);
psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
}
@@ -3798,7 +4007,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
int ret;
- __le64 phys_addr;
+ u64 phys_addr;
struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -3819,15 +4028,15 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]));
- ssp_cmd.addr_low = lower_32_bits(phys_addr);
- ssp_cmd.addr_high = upper_32_bits(phys_addr);
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
+ ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
- __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
- ssp_cmd.addr_low = lower_32_bits(dma_addr);
- ssp_cmd.addr_high = upper_32_bits(dma_addr);
+ u64 dma_addr = sg_dma_address(task->scatter);
+ ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
} else if (task->num_scatter == 0) {
@@ -3850,7 +4059,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
int ret;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- __le64 phys_addr;
+ u64 phys_addr;
u32 ATAP = 0x0;
u32 dir;
struct inbound_queue_table *circularQ;
@@ -3889,13 +4098,13 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]));
+ phys_addr = ccb->ccb_dma_handle +
+ offsetof(struct pm8001_ccb_info, buf_prd[0]);
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
- __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
+ u64 dma_addr = sg_dma_address(task->scatter);
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
@@ -4039,7 +4248,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
- payload.tag = 1;
+ payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
@@ -4063,7 +4272,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
memset(&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = 1;
+ payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
@@ -4092,12 +4301,9 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
{
- unsigned long flags;
- spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_chip_interrupt_disable(pm8001_ha);
process_oq(pm8001_ha);
pm8001_chip_interrupt_enable(pm8001_ha);
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return IRQ_HANDLED;
}
@@ -4360,8 +4566,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
payload.total_image_len = cpu_to_le32(info->total_image_len);
payload.len = info->sgl.im_len.len ;
- payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
- payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
+ payload.sgl_addr_lo =
+ cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
+ payload.sgl_addr_hi =
+ cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
return ret;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 909132041c07..1a4611eb0321 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -625,7 +625,7 @@ struct set_nvm_data_req {
__le32 tag;
__le32 len_ir_vpdd;
__le32 vpd_offset;
- u32 reserved[8];
+ __le32 reserved[8];
__le32 resp_addr_lo;
__le32 resp_addr_hi;
__le32 resp_len;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c21a2163f9f6..36efaa7c3a54 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -62,7 +62,6 @@ static struct scsi_host_template pm8001_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
- .slave_destroy = sas_slave_destroy,
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -76,7 +75,6 @@ static struct scsi_host_template pm8001_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
- .slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = pm8001_host_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index fb3dc9978861..3b11edd4a50c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -166,6 +166,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
DECLARE_COMPLETION_ONSTACK(completion);
+ unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
@@ -209,8 +210,29 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_DISABLE:
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
+ case PHY_FUNC_GET_EVENTS:
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (-1 == pm8001_bar4_shift(pm8001_ha,
+ (phy_id < 4) ? 0x30000 : 0x40000)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return -EINVAL;
+ }
+ {
+ struct sas_phy *phy = sas_phy->phy;
+ uint32_t *qp = (uint32_t *)(((char *)
+ pm8001_ha->io_mem[2].memvirtaddr)
+ + 0x1034 + (0x4000 * (phy_id & 3)));
+
+ phy->invalid_dword_count = qp[0];
+ phy->running_disparity_error_count = qp[1];
+ phy->loss_of_dword_sync_count = qp[3];
+ phy->phy_reset_problem_count = qp[4];
+ }
+ pm8001_bar4_shift(pm8001_ha, 0);
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ return 0;
default:
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
}
msleep(300);
return rc;
@@ -234,12 +256,14 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
if (time < HZ)
return 0;
/* Wait for discovery to finish */
- scsi_flush_work(shost);
+ sas_drain_work(ha);
return 1;
}
@@ -340,7 +364,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
struct pm8001_ccb_info *ccb;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num;
- unsigned long flags = 0, flags_libsas = 0;
+ unsigned long flags = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -364,11 +388,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
ts->stat = SAS_PHY_DOWN;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- spin_unlock_irqrestore(dev->sata_dev.ap->lock,
- flags_libsas);
t->task_done(t);
- spin_lock_irqsave(dev->sata_dev.ap->lock,
- flags_libsas);
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (n > 1)
t = list_entry(t->list.next,
@@ -516,6 +536,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
task->lldd_task = NULL;
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
+ ccb->open_retry = 0;
pm8001_ccb_free(pm8001_ha, ccb_idx);
}
@@ -615,7 +636,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
wait_for_completion(&completion);
if (dev->dev_type == SAS_END_DEV)
msleep(50);
- pm8001_ha->flags |= PM8001F_RUN_TIME ;
+ pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
found_out:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -860,6 +881,77 @@ static int pm8001_issue_ssp_tmf(struct domain_device *dev,
tmf);
}
+/* retry commands by ha, by task and/or by device */
+void pm8001_open_reject_retry(
+ struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task_to_close,
+ struct pm8001_device *device_to_close)
+{
+ int i;
+ unsigned long flags;
+
+ if (pm8001_ha == NULL)
+ return;
+
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+
+ for (i = 0; i < PM8001_MAX_CCB; i++) {
+ struct sas_task *task;
+ struct task_status_struct *ts;
+ struct pm8001_device *pm8001_dev;
+ unsigned long flags1;
+ u32 tag;
+ struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+
+ pm8001_dev = ccb->device;
+ if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+ continue;
+ if (!device_to_close) {
+ uintptr_t d = (uintptr_t)pm8001_dev
+ - (uintptr_t)&pm8001_ha->devices;
+ if (((d % sizeof(*pm8001_dev)) != 0)
+ || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
+ continue;
+ } else if (pm8001_dev != device_to_close)
+ continue;
+ tag = ccb->ccb_tag;
+ if (!tag || (tag == 0xFFFFFFFF))
+ continue;
+ task = ccb->task;
+ if (!task || !task->task_done)
+ continue;
+ if (task_to_close && (task != task_to_close))
+ continue;
+ ts = &task->task_status;
+ ts->resp = SAS_TASK_COMPLETE;
+ /* Force the midlayer to retry */
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ if (pm8001_dev)
+ pm8001_dev->running_req--;
+ spin_lock_irqsave(&task->task_state_lock, flags1);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ if (unlikely((task->task_state_flags
+ & SAS_TASK_STATE_ABORTED))) {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags1);
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags1);
+ pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ mb();/* in order to force CPU ordering */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ task->task_done(task);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+}
+
/**
* Standard mandates link reset for ATA (type 0) and hard reset for
* SSP (type 1) , only for RECOVERY
@@ -875,12 +967,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- phy = sas_find_local_phy(dev);
+ phy = sas_get_local_phy(dev);
if (dev_is_sata(dev)) {
DECLARE_COMPLETION_ONSTACK(completion_setstate);
- if (scsi_is_sas_phy_local(phy))
- return 0;
+ if (scsi_is_sas_phy_local(phy)) {
+ rc = 0;
+ goto out;
+ }
rc = sas_phy_reset(phy, 1);
msleep(2000);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
@@ -889,12 +983,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
wait_for_completion(&completion_setstate);
- } else{
- rc = sas_phy_reset(phy, 1);
- msleep(2000);
+ } else {
+ rc = sas_phy_reset(phy, 1);
+ msleep(2000);
}
PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
pm8001_dev->device_id, rc));
+ out:
+ sas_put_local_phy(phy);
return rc;
}
@@ -906,10 +1002,11 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
if (dev_is_sata(dev)) {
- struct sas_phy *phy = sas_find_local_phy(dev);
+ struct sas_phy *phy = sas_get_local_phy(dev);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
rc = sas_phy_reset(phy, 1);
+ sas_put_local_phy(phy);
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93959febe205..11008205aeb3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -235,6 +235,7 @@ struct pm8001_ccb_info {
struct pm8001_device *device;
struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
struct fw_control_ex *fw_control_context;
+ u8 open_retry;
};
struct mpi_mem {
@@ -484,10 +485,15 @@ void pm8001_dev_gone(struct domain_device *dev);
int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
+void pm8001_open_reject_retry(
+ struct pm8001_hba_info *pm8001_ha,
+ struct sas_task *task_to_close,
+ struct pm8001_device *device_to_close);
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
u32 mem_size, u32 align);
+int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index ca496c7474e3..e1d150f3fd4d 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -857,11 +857,11 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
{0x01180600, IOASC_LOG_LEVEL_HARD,
"Recovered Error, soft media error, sector reassignment suggested"},
{0x015D0000, IOASC_LOG_LEVEL_HARD,
- "Recovered Error, failure prediction thresold exceeded"},
+ "Recovered Error, failure prediction threshold exceeded"},
{0x015D9200, IOASC_LOG_LEVEL_HARD,
- "Recovered Error, soft Cache Card Battery error thresold"},
+ "Recovered Error, soft Cache Card Battery error threshold"},
{0x015D9200, IOASC_LOG_LEVEL_HARD,
- "Recovered Error, soft Cache Card Battery error thresold"},
+ "Recovered Error, soft Cache Card Battery error threshold"},
{0x02048000, IOASC_LOG_LEVEL_HARD,
"Not Ready, IOA Reset Required"},
{0x02408500, IOASC_LOG_LEVEL_HARD,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index d838205ab169..6c6486f626ee 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -359,7 +359,6 @@
#include <asm/byteorder.h>
#include <asm/processor.h>
#include <asm/types.h>
-#include <asm/system.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9f41b3b4358f..5926f5a87ea8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -356,7 +356,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@@ -627,144 +628,6 @@ static struct bin_attribute sysfs_reset_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
- uint16_t dev, adr, opt, len;
- int rval;
-
- ha->edc_data_len = 0;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return -EINVAL;
-
- if (!ha->edc_data) {
- ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->edc_data_dma);
- if (!ha->edc_data) {
- ql_log(ql_log_warn, vha, 0x7073,
- "Unable to allocate memory for EDC write.\n");
- return -ENOMEM;
- }
- }
-
- dev = le16_to_cpup((void *)&buf[0]);
- adr = le16_to_cpup((void *)&buf[2]);
- opt = le16_to_cpup((void *)&buf[4]);
- len = le16_to_cpup((void *)&buf[6]);
-
- if (!(opt & BIT_0))
- if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
- return -EINVAL;
-
- memcpy(ha->edc_data, &buf[8], len);
-
- rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
- dev, adr, len, opt);
- if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
- rval, dev, adr, opt, len, buf[8]);
- return -EIO;
- }
-
- return count;
-}
-
-static struct bin_attribute sysfs_edc_attr = {
- .attr = {
- .name = "edc",
- .mode = S_IWUSR,
- },
- .size = 0,
- .write = qla2x00_sysfs_write_edc,
-};
-
-static ssize_t
-qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
- uint16_t dev, adr, opt, len;
- int rval;
-
- ha->edc_data_len = 0;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
- return -EINVAL;
-
- if (!ha->edc_data) {
- ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->edc_data_dma);
- if (!ha->edc_data) {
- ql_log(ql_log_warn, vha, 0x708c,
- "Unable to allocate memory for EDC status.\n");
- return -ENOMEM;
- }
- }
-
- dev = le16_to_cpup((void *)&buf[0]);
- adr = le16_to_cpup((void *)&buf[2]);
- opt = le16_to_cpup((void *)&buf[4]);
- len = le16_to_cpup((void *)&buf[6]);
-
- if (!(opt & BIT_0))
- if (len == 0 || len > DMA_POOL_SIZE)
- return -EINVAL;
-
- memset(ha->edc_data, 0, len);
- rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
- dev, adr, len, opt);
- if (rval != QLA_SUCCESS) {
- ql_log(ql_log_info, vha, 0x7075,
- "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
- rval, dev, adr, opt, len);
- return -EIO;
- }
-
- ha->edc_data_len = len;
-
- return count;
-}
-
-static ssize_t
-qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
-
- if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
- return 0;
-
- if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
- return -EINVAL;
-
- memcpy(buf, ha->edc_data, ha->edc_data_len);
-
- return ha->edc_data_len;
-}
-
-static struct bin_attribute sysfs_edc_status_attr = {
- .attr = {
- .name = "edc_status",
- .mode = S_IRUSR | S_IWUSR,
- },
- .size = 0,
- .write = qla2x00_sysfs_write_edc_status,
- .read = qla2x00_sysfs_read_edc_status,
-};
-
-static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -879,8 +742,6 @@ static struct sysfs_entry {
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ "reset", &sysfs_reset_attr, },
- { "edc", &sysfs_edc_attr, 2 },
- { "edc_status", &sysfs_edc_status_attr, 2 },
{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },
@@ -898,7 +759,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -926,7 +787,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1231,7 +1092,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1278,7 +1139,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1293,7 +1154,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1316,7 +1177,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1328,7 +1189,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1364,7 +1225,7 @@ qla2x00_thermal_temp_show(struct device *dev,
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
- temp = frac = 0;
+ return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
}
@@ -1493,6 +1354,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_10GB:
speed = FC_PORTSPEED_10GBIT;
break;
+ case PORT_SPEED_16GB:
+ speed = FC_PORTSPEED_16GBIT;
+ break;
}
fc_host_speed(shost) = speed;
}
@@ -1643,10 +1507,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID &&
- !test_bit(UNLOADING, &fcport->vha->dpc_flags))
- fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw))
+ fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ else
+ qla2x00_port_logout(fcport->vha, fcport);
+ }
}
static int
@@ -1889,6 +1757,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
break;
}
}
+
if (qos) {
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
@@ -2086,7 +1955,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 1682e2e4201d..f74cc0602f3b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,29 +11,36 @@
#include <linux/delay.h>
/* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+void
+qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
- srb_t *sp;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
+ sp->free(vha, sp);
+}
+
+void
+qla2x00_bsg_sp_free(void *data, void *ptr)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- goto done;
- }
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
- ctx->iocbs = 1;
-done:
- return sp;
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ if (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_ELS_CMD_HST)
+ kfree(sp->fcport);
+ mempool_free(sp, vha->hw->srb_mempool);
}
int
@@ -101,8 +108,6 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
uint32_t len;
uint32_t oper;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
@@ -217,6 +222,7 @@ exit_fcp_prio_cfg:
bsg_job->job_done(bsg_job);
return ret;
}
+
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
@@ -230,7 +236,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
- struct srb_ctx *els;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
@@ -337,20 +342,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
rval = -ENOMEM;
goto done_unmap_sg;
}
- els = sp->ctx;
- els->type =
+ sp->type =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
- els->name =
+ sp->name =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
"bsg_els_rpt" : "bsg_els_hst");
- els->u.bsg_job = bsg_job;
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
@@ -362,7 +368,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_unmap_sg;
@@ -409,7 +414,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
uint16_t loop_id;
struct fc_port *fcport;
char *type = "FC_BSG_HST_CT";
- struct srb_ctx *ct;
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@@ -486,19 +490,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
fcport->loop_id = loop_id;
/* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x7015,
- "qla2x00_get_ctx_bsg_sp failed.\n");
+ "qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
- ct = sp->ctx;
- ct->type = SRB_CT_CMD;
- ct->name = "bsg_ct";
- ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
- ct->u.bsg_job = bsg_job;
+ sp->type = SRB_CT_CMD;
+ sp->name = "bsg_ct";
+ sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
@@ -511,7 +516,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
@@ -540,7 +544,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -582,7 +586,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -707,7 +711,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if ((ha->current_topology == ISP_CFG_F ||
(atomic_read(&vha->loop_state) == LOOP_DOWN) ||
- (IS_QLA81XX(ha) &&
+ ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -717,13 +721,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
goto done_free_dma_req;
@@ -731,14 +734,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (elreq.options != EXTERNAL_LOOPBACK) {
ql_dbg(ql_dbg_user, vha, 0x7020,
- "Internal: curent port config = %x\n",
+ "Internal: current port config = %x\n",
config[0]);
if (qla81xx_set_internal_loopback(vha, config,
new_config)) {
ql_log(ql_log_warn, vha, 0x7024,
"Internal loopback failed.\n");
- bsg_job->reply->reply_payload_rcv_len =
- 0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@@ -750,8 +751,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
*/
if (qla81xx_reset_internal_loopback(vha,
config, 1)) {
- bsg_job->reply->reply_payload_rcv_len =
- 0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@@ -788,7 +787,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"MPI reset failed.\n");
}
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
goto done_free_dma_req;
@@ -813,7 +811,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
rval = 0;
- bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
@@ -872,7 +869,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@@ -971,9 +968,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
-
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@@ -1159,7 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
- rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@@ -1210,8 +1206,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint8_t *rsp_ptr = NULL;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (!IS_IIDMA_CAPABLE(vha->hw)) {
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
@@ -1304,8 +1298,6 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
int valid = 0;
struct qla_hw_data *ha = vha->hw;
- bsg_job->reply->reply_payload_rcv_len = 0;
-
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
@@ -1331,7 +1323,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_QLA8XXX_TYPE(ha))
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
@@ -1617,6 +1609,9 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host;
scsi_qla_host_t *vha;
+ /* In case no data transferred. */
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
fcport = *(fc_port_t **) rport->dd_data;
@@ -1655,6 +1650,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
+ bsg_job->reply->result = ret;
break;
}
return ret;
@@ -1669,7 +1665,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
int cnt, que;
unsigned long flags;
struct req_que *req;
- struct srb_ctx *sp_bsg;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1681,11 +1676,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- sp_bsg = sp->ctx;
-
- if (((sp_bsg->type == SRB_CT_CMD) ||
- (sp_bsg->type == SRB_ELS_CMD_HST))
- && (sp_bsg->u.bsg_job == bsg_job)) {
+ if (((sp->type == SRB_CT_CMD) ||
+ (sp->type == SRB_ELS_CMD_HST))
+ && (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
@@ -1715,7 +1708,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45cbf0ba624d..897731b93df2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,23 +11,27 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0116 | 0xfa |
- * | Mailbox commands | 0x112b | |
- * | Device Discovery | 0x2084 | |
- * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
- * | | | 0x302e |
+ * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x113e | 0x112c-0x112e |
+ * | | | 0x113a |
+ * | Device Discovery | 0x2086 | 0x2020-0x2022 |
+ * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
+ * | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
- * | Async Events | 0x5057 | 0x5052 |
- * | Timer Routines | 0x6011 | 0x600e,0x600f |
- * | User Space Interactions | 0x709e | 0x7018,0x702e |
- * | | | 0x7039,0x7045 |
+ * | Async Events | 0x505d | 0x502b-0x502f |
+ * | | | 0x5047,0x5052 |
+ * | Timer Routines | 0x6011 | 0x600e-0x600f |
+ * | User Space Interactions | 0x709f | 0x7018,0x702e, |
+ * | | | 0x7039,0x7045, |
+ * | | | 0x7073-0x7075, |
+ * | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb052 | |
- * | MultiQ | 0xc00b | |
- * | Misc | 0xd00b | |
+ * | ISP82XX Specific | 0xb054 | 0xb053 |
+ * | MultiQ | 0xc00c | |
+ * | Misc | 0xd010 | |
* ----------------------------------------------------------------------
*/
@@ -85,7 +89,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- dwords = GID_LIST_SIZE / 4;
+ dwords = qla2x00_gid_list_size(ha) / 4;
for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
cnt += dwords, addr += dwords) {
if (cnt + dwords > ram_dwords)
@@ -260,7 +264,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- words = GID_LIST_SIZE / 2;
+ words = qla2x00_gid_list_size(ha) / 2;
for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
cnt += words, addr += words) {
if (cnt + words > ram_words)
@@ -375,6 +379,77 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ struct req_que *req;
+ struct rsp_que *rsp;
+ int que;
+
+ if (!ha->mqenable)
+ return ptr;
+
+ /* Request queues */
+ for (que = 1; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (req->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(req->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, req->ring, req->length * sizeof(request_t));
+ ptr += req->length * sizeof(request_t);
+ }
+
+ /* Response queues */
+ for (que = 1; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+ if (!rsp)
+ break;
+
+ /* Add chain. */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (rsp->length * sizeof(response_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(rsp->length * sizeof(response_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
+ ptr += rsp->length * sizeof(response_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
uint32_t cnt, que_idx;
@@ -382,7 +457,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
struct device_reg_25xxmq __iomem *reg;
- if (!ha->mqenable)
+ if (!ha->mqenable || IS_QLA83XX(ha))
return ptr;
mq = ptr;
@@ -1322,12 +1397,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
- qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1636,12 +1715,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
- qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1650,6 +1733,507 @@ qla81xx_fw_dump_failed:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+void
+qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt, reg_data;
+ uint32_t risc_address;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla83xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt, *nxt_chain;
+ uint32_t *last_chain = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ ql_log(ql_log_warn, vha, 0xd00c,
+ "No buffer available for dump!!!\n");
+ goto qla83xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xd00d,
+ "Firmware has been previously dumped (%p) -- ignoring "
+ "request...\n", ha->fw_dump);
+ goto qla83xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp83;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /* Pause RISC. */
+ rval = qla24xx_pause_risc(reg);
+ if (rval != QLA_SUCCESS)
+ goto qla83xx_fw_dump_failed_0;
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ dmp_reg = &reg->iobase_window;
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ dmp_reg = &reg->unused_4_1[0];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ dmp_reg = &reg->unused_4_1[2];
+ reg_data = RD_REG_DWORD(dmp_reg);
+ WRT_REG_DWORD(dmp_reg, 0);
+
+ /* select PCR and disable ecc checking and correction */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+
+ /* Host/Risc registers. */
+ iter_reg = fw->host_risc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7040, 16, iter_reg);
+
+ /* PCIe registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ dmp_reg = &reg->iobase_c4;
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ WRT_REG_DWORD(&reg->iobase_window, 0x00);
+ RD_REG_DWORD(&reg->iobase_window);
+
+ /* Host interface registers. */
+ dmp_reg = &reg->flash_addr;
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+ iter_reg = fw->xseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+ qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+ iter_reg = fw->rseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+ qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+ qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB170, 16, iter_reg);
+
+ iter_reg = fw->aseq_0_reg;
+ iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+ qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+ qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+ qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
+
+ /* Command DMA registers. */
+ iter_reg = fw->cmd_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
+ qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+ iter_reg = fw->xmt1_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+ iter_reg = fw->xmt2_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+ iter_reg = fw->xmt3_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+ iter_reg = fw->xmt4_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+ qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+ qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+ qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+ qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
+
+ /* RQ0 Array registers. */
+ iter_reg = fw->rq0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
+
+ /* RQ1 Array registers. */
+ iter_reg = fw->rq1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
+
+ /* RP0 Array registers. */
+ iter_reg = fw->rp0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
+
+ /* RP1 Array registers. */
+ iter_reg = fw->rp1_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
+
+ iter_reg = fw->at0_array_reg;
+ iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
+
+ /* I/O Queue Control registers. */
+ qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
+ iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
+ qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+ /* Multi queue registers */
+ nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+ &last_chain);
+
+ rval = qla24xx_soft_reset(ha);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xd00e,
+ "SOFT RESET FAILED, forcing continuation of dump!!!\n");
+ rval = QLA_SUCCESS;
+
+ ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ RD_REG_DWORD(&reg->hccr);
+
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr);
+
+ for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ udelay(5);
+
+ if (!cnt) {
+ nxt = fw->code_ram;
+ nxt += sizeof(fw->code_ram),
+ nxt += (ha->fw_memory_size - 0x100000 + 1);
+ goto copy_queue;
+ } else
+ ql_log(ql_log_warn, vha, 0xd010,
+ "bigger hammer success?\n");
+ }
+
+ rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+ &nxt);
+ if (rval != QLA_SUCCESS)
+ goto qla83xx_fw_dump_failed_0;
+
+copy_queue:
+ nxt = qla2xxx_copy_queues(ha, nxt);
+
+ nxt = qla24xx_copy_eft(ha, nxt);
+
+ /* Chain entries -- started with MQ. */
+ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla83xx_fw_dump_failed_0:
+ qla2xxx_dump_post_process(base_vha, rval);
+
+qla83xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
@@ -1782,13 +2366,13 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
vaf.va = &va;
switch (level) {
- case 0: /* FATAL LOG */
+ case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
- case 1:
+ case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
- case 2:
+ case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:
@@ -1837,13 +2421,13 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
vaf.va = &va;
switch (level) {
- case 0: /* FATAL LOG */
+ case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
- case 1:
+ case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
- case 2:
+ case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5f1b6d9c3dcb..2157bdf1569a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -165,6 +165,54 @@ struct qla81xx_fw_dump {
uint32_t ext_mem[1];
};
+struct qla83xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_risc_reg[48];
+ uint32_t pcie_regs[4];
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[256];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t xseq_2_reg[16];
+ uint32_t rseq_gp_reg[256];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t rseq_3_reg[16];
+ uint32_t aseq_gp_reg[256];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t aseq_3_reg[16];
+ uint32_t cmd_dma_reg[64];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[256];
+ uint32_t rq0_array_reg[256];
+ uint32_t rq1_array_reg[256];
+ uint32_t rp0_array_reg[256];
+ uint32_t rp1_array_reg[256];
+ uint32_t queue_control_reg[16];
+ uint32_t fb_hdw_reg[432];
+ uint32_t at0_array_reg[128];
+ uint32_t code_ram[0x2400];
+ uint32_t ext_mem[1];
+};
+
#define EFT_NUM_BUFFERS 4
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -192,9 +240,23 @@ struct qla2xxx_mq_chain {
uint32_t qregs[4 * QLA_MQ_SIZE];
};
+struct qla2xxx_mqueue_header {
+ uint32_t queue;
+#define TYPE_REQUEST_QUEUE 0x1
+#define TYPE_RESPONSE_QUEUE 0x2
+ uint32_t number;
+ uint32_t size;
+};
+
+struct qla2xxx_mqueue_chain {
+ uint32_t type;
+ uint32_t chain_size;
+};
+
#define DUMP_CHAIN_VARIANT 0x80000000
#define DUMP_CHAIN_FCE 0x7FFFFAF0
#define DUMP_CHAIN_MQ 0x7FFFFAF1
+#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
@@ -228,6 +290,7 @@ struct qla2xxx_fw_dump {
struct qla24xx_fw_dump isp24;
struct qla25xx_fw_dump isp25;
struct qla81xx_fw_dump isp81;
+ struct qla83xx_fw_dump isp83;
} isp;
};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index af1003f9de1e..a2443031dbe7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -125,17 +125,17 @@
* Fibre Channel device definitions.
*/
#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
-#define MAX_FIBRE_DEVICES 512
+#define MAX_FIBRE_DEVICES_2100 512
+#define MAX_FIBRE_DEVICES_2400 2048
+#define MAX_FIBRE_DEVICES_LOOP 128
+#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
#define MAX_FIBRE_LUNS 0xFFFF
-#define MAX_RSCN_COUNT 32
#define MAX_HOST_COUNT 16
/*
* Host adapter default definitions.
*/
#define MAX_BUSES 1 /* We only have one bus today */
-#define MAX_TARGETS_2100 MAX_FIBRE_DEVICES
-#define MAX_TARGETS_2200 MAX_FIBRE_DEVICES
#define MIN_LUNS 8
#define MAX_LUNS MAX_FIBRE_LUNS
#define MAX_CMDS_PER_LUN 255
@@ -202,20 +202,12 @@ struct sd_dif_tuple {
/*
* SCSI Request Block
*/
-typedef struct srb {
- atomic_t ref_count;
- struct fc_port *fcport;
- uint32_t handle;
-
+struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
-
- uint16_t flags;
-
uint32_t request_sense_length;
uint8_t *request_sense_ptr;
-
void *ctx;
-} srb_t;
+};
/*
* SRB flag definitions
@@ -254,10 +246,7 @@ struct srb_iocb {
} u;
struct timer_list timer;
-
- void (*done)(srb_t *);
- void (*free)(srb_t *);
- void (*timeout)(srb_t *);
+ void (*timeout)(void *);
};
/* Values for srb_ctx type */
@@ -268,16 +257,37 @@ struct srb_iocb {
#define SRB_CT_CMD 5
#define SRB_ADISC_CMD 6
#define SRB_TM_CMD 7
+#define SRB_SCSI_CMD 8
-struct srb_ctx {
+typedef struct srb {
+ atomic_t ref_count;
+ struct fc_port *fcport;
+ uint32_t handle;
+ uint16_t flags;
uint16_t type;
char *name;
int iocbs;
union {
- struct srb_iocb *iocb_cmd;
+ struct srb_iocb iocb_cmd;
struct fc_bsg_job *bsg_job;
+ struct srb_cmd scmd;
} u;
-};
+ void (*done)(void *, void *, int);
+ void (*free)(void *, void *);
+} srb_t;
+
+#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
+#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
+#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
+
+#define GET_CMD_SENSE_LEN(sp) \
+ (sp->u.scmd.request_sense_length)
+#define SET_CMD_SENSE_LEN(sp, len) \
+ (sp->u.scmd.request_sense_length = len)
+#define GET_CMD_SENSE_PTR(sp) \
+ (sp->u.scmd.request_sense_ptr)
+#define SET_CMD_SENSE_PTR(sp, ptr) \
+ (sp->u.scmd.request_sense_ptr = ptr)
struct msg_echo_lb {
dma_addr_t send_dma;
@@ -653,8 +663,10 @@ typedef struct {
#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */
#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */
#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */
+#define MBC_CONFIGURE_VF 0x4b /* Configure VFs */
#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */
#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */
+#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
@@ -1709,6 +1721,7 @@ typedef struct fc_port {
uint16_t vp_idx;
uint8_t fc4_type;
+ uint8_t scan_state;
} fc_port_t;
/*
@@ -1761,7 +1774,6 @@ static const char * const port_state_str[] = {
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
-#define GID_PT_RSP_SIZE (16 + (MAX_FIBRE_DEVICES * 4))
#define GPN_ID_CMD 0x112
#define GPN_ID_REQ_SIZE (16 + 4)
@@ -2051,7 +2063,9 @@ struct ct_sns_rsp {
} ga_nxt;
struct {
- struct ct_sns_gid_pt_data entries[MAX_FIBRE_DEVICES];
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gid_pt_data
+ entries[MAX_FIBRE_DEVICES_MAX];
} gid_pt;
struct {
@@ -2112,7 +2126,11 @@ struct ct_sns_pkt {
#define GID_PT_SNS_SCMD_LEN 6
#define GID_PT_SNS_CMD_SIZE 28
-#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES * 4 + 16)
+/*
+ * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
+ * adapters.
+ */
+#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16)
#define GPN_ID_SNS_SCMD_LEN 6
#define GPN_ID_SNS_CMD_SIZE 28
@@ -2160,7 +2178,6 @@ struct gid_list_info {
uint16_t loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
-#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
/* NPIV */
typedef struct vport_info {
@@ -2261,6 +2278,7 @@ struct isp_operations {
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
#define QLA_PCI_MSIX_CONTROL 0xa2
+#define QLA_83XX_PCI_MSIX_CONTROL 0x92
struct scsi_qla_host;
@@ -2341,7 +2359,7 @@ struct qla_statistics {
#define QLA_MQ_SIZE 32
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
- ((ha->mqenable) ? \
+ ((ha->mqenable || IS_QLA83XX(ha)) ? \
((void *)(ha->mqiobase) +\
(QLA_QUE_PAGE * id)) :\
((void *)(ha->iobase)))
@@ -2461,6 +2479,7 @@ struct qla_hw_data {
#define MIN_IOBASE_LEN 0x100
/* Multi queue data structs */
device_reg_t __iomem *mqiobase;
+ device_reg_t __iomem *msixbase;
uint16_t msix_count;
uint8_t mqenable;
struct req_que **req_q_map;
@@ -2485,6 +2504,7 @@ struct qla_hw_data {
atomic_t loop_down_timer; /* loop down timer */
uint8_t link_down_timeout; /* link down timeout */
uint16_t max_loop_id;
+ uint16_t max_fibre_devices; /* Maximum number of targets */
uint16_t fb_rev;
uint16_t min_external_loopid; /* First external loop Id */
@@ -2494,6 +2514,7 @@ struct qla_hw_data {
#define PORT_SPEED_2GB 0x01
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
+#define PORT_SPEED_16GB 0x05
#define PORT_SPEED_10GB 0x13
uint16_t link_data_rate; /* F/W operating speed */
@@ -2515,6 +2536,8 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
+#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
+#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2531,7 +2554,9 @@ struct qla_hw_data {
#define DT_ISP8432 BIT_12
#define DT_ISP8001 BIT_13
#define DT_ISP8021 BIT_14
-#define DT_ISP_LAST (DT_ISP8021 << 1)
+#define DT_ISP2031 BIT_15
+#define DT_ISP8031 BIT_16
+#define DT_ISP_LAST (DT_ISP8031 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2555,26 +2580,30 @@ struct qla_hw_data {
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
+#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
+#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
-#define IS_QLA81XX(ha) (IS_QLA8001(ha))
-#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
+#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+ IS_QLA8031(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA82XX(ha))
-#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
- (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
+ IS_QLA82XX(ha) || IS_QLA83XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -2583,6 +2612,8 @@ struct qla_hw_data {
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
+#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
+#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -2621,10 +2652,6 @@ struct qla_hw_data {
void *sfp_data;
dma_addr_t sfp_data_dma;
- uint8_t *edc_data;
- dma_addr_t edc_data_dma;
- uint16_t edc_data_len;
-
#define XGMAC_DATA_SIZE 4096
void *xgmac_data;
dma_addr_t xgmac_data_dma;
@@ -2653,6 +2680,8 @@ struct qla_hw_data {
void *async_pd;
dma_addr_t async_pd_dma;
+ void *swl;
+
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -2674,6 +2703,8 @@ struct qla_hw_data {
uint16_t fw_minor_version;
uint16_t fw_subminor_version;
uint16_t fw_attributes;
+ uint16_t fw_attributes_h;
+ uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
uint32_t fw_srisc_address;
@@ -2851,7 +2882,6 @@ typedef struct scsi_qla_host {
volatile struct {
uint32_t init_done :1;
uint32_t online :1;
- uint32_t rscn_queue_overflow :1;
uint32_t reset_active :1;
uint32_t management_server_logged_in :1;
@@ -2905,11 +2935,6 @@ typedef struct scsi_qla_host {
- /* RSCN queue. */
- uint32_t rscn_queue[MAX_RSCN_COUNT];
- uint8_t rscn_in_ptr;
- uint8_t rscn_out_ptr;
-
/* Timeout timers. */
uint8_t loop_down_abort_time; /* port down timer */
atomic_t loop_down_timer; /* loop down timer */
@@ -3005,7 +3030,6 @@ typedef struct scsi_qla_host {
#define QLA_ABORTED 0x105
#define QLA_SUSPENDED 0x106
#define QLA_BUSY 0x107
-#define QLA_RSCNS_HANDLED 0x108
#define QLA_ALREADY_REGISTERED 0x109
#define NVRAM_DELAY() udelay(10)
@@ -3021,6 +3045,7 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_25XX 0x200000
#define OPTROM_SIZE_81XX 0x400000
#define OPTROM_SIZE_82XX 0x800000
+#define OPTROM_SIZE_83XX 0x1000000
#define OPTROM_BURST_SIZE 0x1000
#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b4c2b794c6f..499c74e39ee5 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto out;
if (!ha->fce)
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index aa69486dc064..6d7d7758c797 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1327,6 +1327,11 @@ struct qla_flt_header {
#define FLT_REG_GOLD_FW 0x2f
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
+#define FLT_REG_FCOE_FW 0xA4
+#define FLT_REG_FCOE_VPD_0 0xA9
+#define FLT_REG_FCOE_NVRAM_0 0xAA
+#define FLT_REG_FCOE_VPD_1 0xAB
+#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
uint32_t code;
@@ -1494,6 +1499,11 @@ struct access_chip_rsp_84xx {
#define MBC_GET_XGMAC_STATS 0x7a
#define MBC_GET_DCBX_PARAMS 0x51
+/*
+ * ISP83xx mailbox commands
+ */
+#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+
/* Flash access control option field bit definitions */
#define FAC_OPT_FORCE_SEMAPHORE BIT_15
#define FAC_OPT_REQUESTOR_ID BIT_14
@@ -1875,4 +1885,7 @@ struct qla_fcp_prio_cfg {
#define FA_NPIV_CONF0_ADDR_81 0xD1000
#define FA_NPIV_CONF1_ADDR_81 0xD2000
+/* 83XX Flash locations -- occupies second 8MB region. */
+#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 408679be8fdf..9f065804bd12 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
- struct srb_iocb *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
@@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
-
+extern void qla2x00_sp_free_dma(void *, void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -205,8 +202,7 @@ extern int
qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
extern int
-qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
- uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
+qla2x00_get_fw_version(scsi_qla_host_t *);
extern int
qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -371,6 +367,9 @@ qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
extern int
qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
+extern int
+qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -409,8 +408,10 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
extern int qla24xx_beacon_on(struct scsi_qla_host *);
extern int qla24xx_beacon_off(struct scsi_qla_host *);
extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern void qla83xx_beacon_blink(struct scsi_qla_host *);
extern int qla82xx_beacon_on(struct scsi_qla_host *);
extern int qla82xx_beacon_off(struct scsi_qla_host *);
+extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
@@ -541,6 +542,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
+extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_bsg_job_done(void *, void *, int);
+extern void qla2x00_bsg_sp_free(void *, void *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
@@ -576,6 +581,8 @@ extern void qla82xx_start_iocbs(scsi_qla_host_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
@@ -589,6 +596,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
+/* 83xx related functions */
+extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
extern int qla82xx_md_get_template(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4aea4ae23300..3128f80441f5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -240,6 +240,12 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
return (rval);
}
+static inline int
+qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
+{
+ return vha->hw->max_fibre_devices * 4 + 16;
+}
+
/**
* qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
* @ha: HA context
@@ -261,20 +267,21 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_gid_pt_data *gid_data;
struct qla_hw_data *ha = vha->hw;
+ uint16_t gid_pt_rsp_size;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gid_pt(vha, list);
gid_data = NULL;
-
+ gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
/* Issue GID_PT */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
- GID_PT_RSP_SIZE);
+ gid_pt_rsp_size);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
- GID_PT_RSP_SIZE);
+ gid_pt_rsp_size);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_type */
@@ -292,7 +299,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
gid_data = &ct_rsp->rsp.gid_pt.entries[i];
list[i].d_id.b.domain = gid_data->port_id[0];
list[i].d_id.b.area = gid_data->port_id[1];
@@ -313,7 +320,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
* single call. Return a failed status, and let GA_NXT handle
* the overload.
*/
- if (i == MAX_FIBRE_DEVICES)
+ if (i == ha->max_fibre_devices)
rval = QLA_FUNCTION_FAILED;
}
@@ -330,7 +337,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
ms_iocb_entry_t *ms_pkt;
@@ -341,7 +348,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gpn_id(vha, list);
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
@@ -364,9 +371,11 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2056,
"GPN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save portname */
memcpy(list[i].port_name,
@@ -391,7 +400,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
@@ -401,7 +410,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gnn_id(vha, list);
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
@@ -424,9 +433,11 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2057,
"GNN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save nodename */
memcpy(list[i].node_name,
@@ -735,7 +746,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
static int
qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
struct sns_cmd_pkt *sns_cmd;
@@ -814,11 +825,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
uint16_t i;
uint8_t *entry;
struct sns_cmd_pkt *sns_cmd;
+ uint16_t gid_pt_sns_data_size;
+
+ gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
/* Issue GID_PT. */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
- GID_PT_SNS_DATA_SIZE);
+ gid_pt_sns_data_size);
/* Prepare SNS command arguments -- port_type. */
sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
@@ -839,7 +853,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
entry = &sns_cmd->p.gid_data[(i * 4) + 16];
list[i].d_id.b.domain = entry[1];
list[i].d_id.b.area = entry[2];
@@ -858,7 +872,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
* single call. Return a failed status, and let GA_NXT handle
* the overload.
*/
- if (i == MAX_FIBRE_DEVICES)
+ if (i == ha->max_fibre_devices)
rval = QLA_FUNCTION_FAILED;
}
@@ -877,12 +891,12 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
static int
qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
@@ -933,12 +947,12 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
static int
qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare SNS command request. */
sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
@@ -1107,20 +1121,26 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
static int
qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
{
- int ret;
+ int ret, rval;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
ret = QLA_SUCCESS;
if (vha->flags.management_server_logged_in)
return ret;
- ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
- mb, BIT_1|BIT_0);
- if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_disc, vha, 0x2024,
- "Failed management_server login: loopid=%x mb[0]=%x "
- "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
- vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
+ rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
+ 0xfa, mb, BIT_1|BIT_0);
+ if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_disc, vha, 0x2085,
+ "Failed management_server login: loopid=%x "
+ "rval=%d\n", vha->mgmt_svr_loop_id, rval);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2024,
+ "Failed management_server login: loopid=%x "
+ "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
+ mb[7]);
ret = QLA_FUNCTION_FAILED;
} else
vha->flags.management_server_logged_in = 1;
@@ -1547,7 +1567,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA25XX(ha))
@@ -1594,6 +1614,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
break;
+ case PORT_SPEED_16GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+ break;
default:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1724,7 +1748,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
int
qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
@@ -1734,7 +1758,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
@@ -1757,9 +1781,11 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2023,
"GFPN_ID issue IOCB failed (%d).\n", rval);
+ break;
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
+ break;
} else {
/* Save fabric portname */
memcpy(list[i].fabric_port_name,
@@ -1846,7 +1872,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
if (rval)
return rval;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
@@ -1947,7 +1973,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct qla_hw_data *ha = vha->hw;
uint8_t fcp_scsi_features = 0;
- for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
+ for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
* Process this port */
list[i].fc4_type = FC4_TYPE_UNKNOWN;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1fa067e053d2..b9465643396b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -29,7 +29,6 @@ static int qla2x00_configure_loop(scsi_qla_host_t *);
static int qla2x00_configure_local_loop(scsi_qla_host_t *);
static int qla2x00_configure_fabric(scsi_qla_host_t *);
static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
-static int qla2x00_device_resync(scsi_qla_host_t *);
static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
uint16_t *);
@@ -41,11 +40,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
/* SRB Extensions ---------------------------------------------------------- */
-static void
-qla2x00_ctx_sp_timeout(unsigned long __data)
+void
+qla2x00_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
- struct srb_ctx *ctx;
struct srb_iocb *iocb;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +53,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
spin_lock_irqsave(&ha->hardware_lock, flags);
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
- ctx = sp->ctx;
- iocb = ctx->u.iocb_cmd;
+ iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- iocb->free(sp);
+ sp->free(fcport->vha, sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void
-qla2x00_ctx_sp_free(srb_t *sp)
+void
+qla2x00_sp_free(void *data, void *ptr)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = ctx->u.iocb_cmd;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- kfree(iocb);
- kfree(ctx);
- mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+ mempool_free(sp, vha->hw->srb_mempool);
QLA_VHA_MARK_NOT_BUSY(vha);
}
-inline srb_t *
-qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
- unsigned long tmo)
-{
- srb_t *sp = NULL;
- struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
- struct srb_iocb *iocb;
- uint8_t bail;
-
- QLA_VHA_MARK_BUSY(vha, bail);
- if (bail)
- return NULL;
-
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- goto done;
- }
- iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
- if (!iocb) {
- mempool_free(sp, ha->srb_mempool);
- sp = NULL;
- kfree(ctx);
- goto done;
- }
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
- ctx->iocbs = 1;
- ctx->u.iocb_cmd = iocb;
- iocb->free = qla2x00_ctx_sp_free;
-
- init_timer(&iocb->timer);
- if (!tmo)
- goto done;
- iocb->timer.expires = jiffies + tmo * HZ;
- iocb->timer.data = (unsigned long)sp;
- iocb->timer.function = qla2x00_ctx_sp_timeout;
- add_timer(&iocb->timer);
-done:
- if (!sp)
- QLA_VHA_MARK_NOT_BUSY(vha);
- return sp;
-}
-
/* Asynchronous Login/Logout Routines -------------------------------------- */
static inline unsigned long
@@ -149,19 +93,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
}
static void
-qla2x00_async_iocb_timeout(srb_t *sp)
+qla2x00_async_iocb_timeout(void *data)
{
+ srb_t *sp = (srb_t *)data;
fc_port_t *fcport = sp->fcport;
- struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
"Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
- ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+ sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
- if (ctx->type == SRB_LOGIN_CMD) {
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ if (sp->type == SRB_LOGIN_CMD) {
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +117,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
}
static void
-qla2x00_async_login_ctx_done(srb_t *sp)
+qla2x00_async_login_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
@@ -188,22 +134,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_LOGIN_CMD;
- ctx->name = "login";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_LOGIN_CMD;
+ sp->name = "login";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_login_ctx_done;
+ sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +164,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_logout_ctx_done(srb_t *sp)
+qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_LOGOUT_CMD;
- ctx->name = "logout";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_LOGOUT_CMD;
+ sp->name = "logout";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_logout_ctx_done;
+ sp->done = qla2x00_async_logout_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -266,20 +212,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_adisc_ctx_done(srb_t *sp)
+qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
-
- qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- lio->free(sp);
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp->fcport->vha, sp);
}
int
@@ -287,22 +235,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_ADISC_CMD;
- ctx->name = "adisc";
- lio = ctx->u.iocb_cmd;
+ sp->type = SRB_ADISC_CMD;
+ sp->name = "adisc";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
- lio->done = qla2x00_async_adisc_ctx_done;
+ sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
@@ -316,46 +263,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- lio->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
static void
-qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
+qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ uint32_t flags;
+ uint16_t lun;
+ int rval;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ flags = iocb->u.tmf.flags;
+ lun = (uint16_t)iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
+ vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
- qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
- iocb->free(sp);
+ if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
+ }
+ }
+ sp->free(sp->fcport->vha, sp);
}
int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
srb_t *sp;
- struct srb_ctx *ctx;
struct srb_iocb *tcf;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
- qla2x00_get_async_timeout(vha) + 2);
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- ctx = sp->ctx;
- ctx->type = SRB_TM_CMD;
- ctx->name = "tmf";
- tcf = ctx->u.iocb_cmd;
- tcf->u.tmf.flags = flags;
+ sp->type = SRB_TM_CMD;
+ sp->name = "tmf";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ tcf = &sp->u.iocb_cmd;
+ tcf->u.tmf.flags = tm_flags;
tcf->u.tmf.lun = lun;
tcf->u.tmf.data = tag;
tcf->timeout = qla2x00_async_iocb_timeout;
- tcf->done = qla2x00_async_tm_cmd_ctx_done;
+ sp->done = qla2x00_async_tm_cmd_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -368,7 +331,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
return rval;
done_free_sp:
- tcf->free(sp);
+ sp->free(fcport->vha, sp);
done:
return rval;
}
@@ -387,6 +350,13 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
* requests.
*/
rval = qla2x00_get_port_database(vha, fcport, 0);
+ if (rval == QLA_NOT_LOGGED_IN) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
if (rval != QLA_SUCCESS) {
qla2x00_post_async_logout_work(vha, fcport, NULL);
qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -452,30 +422,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
-void
-qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- struct srb_iocb *iocb)
-{
- int rval;
- uint32_t flags;
- uint16_t lun;
-
- flags = iocb->u.tmf.flags;
- lun = (uint16_t)iocb->u.tmf.lun;
-
- /* Issue Marker IOCB */
- rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
- vha->hw->rsp_q_map[0], fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
-
- if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
- "TM IOCB failed (%x).\n", rval);
- }
-
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -969,6 +915,9 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
{
uint16_t mb[4] = {0x1010, 0, 1, 0};
+ if (!IS_QLA81XX(vha->hw))
+ return QLA_SUCCESS;
+
return qla81xx_write_mpi_register(vha, mb);
}
@@ -1262,7 +1211,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA81XX(ha))
+ if (IS_QLA83XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
else if (IS_QLA25XX(ha))
fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
@@ -1270,10 +1221,20 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
- if (ha->mqenable)
- mq_size = sizeof(struct qla2xxx_mq_chain);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
/* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -1484,17 +1445,8 @@ enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
if (IS_QLA82XX(ha))
qla82xx_check_md_needed(vha);
- else {
- rval = qla2x00_get_fw_version(vha,
- &ha->fw_major_version,
- &ha->fw_minor_version,
- &ha->fw_subminor_version,
- &ha->fw_attributes,
- &ha->fw_memory_size,
- ha->mpi_version,
- &ha->mpi_capabilities,
- ha->phy_version);
- }
+ else
+ rval = qla2x00_get_fw_version(vha);
if (rval != QLA_SUCCESS)
goto failed;
ha->flags.npiv_supported = 0;
@@ -1535,6 +1487,9 @@ enable_82xx_npiv:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+ if (IS_QLA83XX(ha))
+ goto skip_fac_check;
+
if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
uint32_t size;
@@ -1547,6 +1502,11 @@ enable_82xx_npiv:
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
+skip_fac_check:
+ if (IS_QLA83XX(ha)) {
+ ha->flags.fac_supported = 0;
+ rval = QLA_SUCCESS;
+ }
}
}
failed:
@@ -1725,7 +1685,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
-/* Setup ring parameters in initialization control block. */
+ /* Setup ring parameters in initialization control block. */
icb = (struct init_cb_24xx *)ha->init_cb;
icb->request_q_outpointer = __constant_cpu_to_le16(0);
icb->response_q_inpointer = __constant_cpu_to_le16(0);
@@ -1736,7 +1696,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
- if (ha->mqenable) {
+ if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
@@ -1756,7 +1716,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
__constant_cpu_to_le32(BIT_18);
/* Use Disable MSIX Handshake mode for capable adapters */
- if (IS_MSIX_NACK_CAPABLE(ha)) {
+ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
+ (ha->flags.msix_enabled)) {
icb->firmware_options_2 &=
__constant_cpu_to_le32(~BIT_22);
ha->flags.disable_msix_handshake = 1;
@@ -1800,7 +1761,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct rsp_que *rsp;
- struct scsi_qla_host *vp;
struct mid_init_cb_24xx *mid_init_cb =
(struct mid_init_cb_24xx *) ha->init_cb;
@@ -1831,11 +1791,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
spin_lock(&ha->vport_slock);
- /* Clear RSCN queue. */
- list_for_each_entry(vp, &ha->vp_list, list) {
- vp->rscn_in_ptr = 0;
- vp->rscn_out_ptr = 0;
- }
spin_unlock(&ha->vport_slock);
@@ -2028,7 +1983,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
&loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
if (rval != QLA_SUCCESS) {
if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
- IS_QLA8XXX_TYPE(ha) ||
+ IS_CNA_CAPABLE(ha) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
ql_dbg(ql_dbg_disc, vha, 0x2008,
"Loop is in a transition state.\n");
@@ -2120,7 +2075,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_QLA8XXX_TYPE(ha);
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -2596,13 +2551,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
} else if (ha->current_topology == ISP_CFG_F &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
clear_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -2612,7 +2565,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (!vha->flags.online ||
(test_bit(ABORT_ISP_ACTIVE, &flags))) {
- vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
}
@@ -2622,8 +2574,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2015,
"Loop resync needed, failing.\n");
rval = QLA_FUNCTION_FAILED;
- }
- else
+ } else
rval = qla2x00_configure_local_loop(vha);
}
@@ -2662,8 +2613,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
if (test_bit(RSCN_UPDATE, &save_flags)) {
set_bit(RSCN_UPDATE, &vha->dpc_flags);
- if (!IS_ALOGIO_CAPABLE(ha))
- vha->flags.rscn_queue_overflow = 1;
}
}
@@ -2699,7 +2648,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
found_devs = 0;
new_fcport = NULL;
- entries = MAX_FIBRE_DEVICES;
+ entries = MAX_FIBRE_DEVICES_LOOP;
ql_dbg(ql_dbg_disc, vha, 0x2016,
"Getting FCAL position map.\n");
@@ -2707,7 +2656,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
qla2x00_get_fcal_position_map(vha, NULL);
/* Get list of logged in devices. */
- memset(ha->gid_list, 0, GID_LIST_SIZE);
+ memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
@@ -2971,7 +2920,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
- int rval, rval2;
+ int rval;
fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
@@ -2995,12 +2944,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
- /* Mark devices that need re-synchronization. */
- rval2 = qla2x00_device_resync(vha);
- if (rval2 == QLA_RSCNS_HANDLED) {
- /* No point doing the scan, just continue. */
- return (QLA_SUCCESS);
- }
do {
/* FDMI support. */
if (ql2xfdmienable &&
@@ -3012,8 +2955,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = NPH_SNS;
else
loop_id = SIMPLE_NAME_SERVER;
- ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
- 0xfc, mb, BIT_1 | BIT_0);
+ rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
+ 0xfc, mb, BIT_1|BIT_0);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
"Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
@@ -3044,6 +2991,13 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
@@ -3059,7 +3013,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3184,20 +3139,21 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
rval = QLA_SUCCESS;
/* Try GID_PT to get device list, else GAN. */
- swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
+ if (!ha->swl)
+ ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
+ GFP_KERNEL);
+ swl = ha->swl;
if (!swl) {
/*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2054,
"GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
+ memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
- kfree(swl);
swl = NULL;
} else if (ql2xiidmaenable &&
qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
@@ -3215,7 +3171,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x205e,
"Failed to allocate memory for fcport.\n");
- kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
@@ -3332,6 +3287,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+
found++;
/* Update port state. */
@@ -3368,6 +3325,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->flags |= FCF_LOGIN_NEEDED;
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ (fcport->flags & FCF_ASYNC_SENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3390,14 +3348,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x2066,
"Memory allocation failed for fcport.\n");
- kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
new_fcport->d_id.b24 = nxt_d_id.b24;
}
- kfree(swl);
kfree(new_fcport);
return (rval);
@@ -3470,6 +3426,9 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
/* If not in use then it is free to use. */
if (!found) {
+ ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+ "Assigning new loopid=%x, portid=%x.\n",
+ dev->loop_id, dev->d_id.b24);
break;
}
@@ -3488,110 +3447,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
}
/*
- * qla2x00_device_resync
- * Marks devices in the database that needs resynchronization.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Context:
- * Kernel context.
- */
-static int
-qla2x00_device_resync(scsi_qla_host_t *vha)
-{
- int rval;
- uint32_t mask;
- fc_port_t *fcport;
- uint32_t rscn_entry;
- uint8_t rscn_out_iter;
- uint8_t format;
- port_id_t d_id = {};
-
- rval = QLA_RSCNS_HANDLED;
-
- while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
- vha->flags.rscn_queue_overflow) {
-
- rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
- format = MSB(MSW(rscn_entry));
- d_id.b.domain = LSB(MSW(rscn_entry));
- d_id.b.area = MSB(LSW(rscn_entry));
- d_id.b.al_pa = LSB(LSW(rscn_entry));
-
- ql_dbg(ql_dbg_disc, vha, 0x2020,
- "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
- vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
- d_id.b.al_pa);
-
- vha->rscn_out_ptr++;
- if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
- vha->rscn_out_ptr = 0;
-
- /* Skip duplicate entries. */
- for (rscn_out_iter = vha->rscn_out_ptr;
- !vha->flags.rscn_queue_overflow &&
- rscn_out_iter != vha->rscn_in_ptr;
- rscn_out_iter = (rscn_out_iter ==
- (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
-
- if (rscn_entry != vha->rscn_queue[rscn_out_iter])
- break;
-
- ql_dbg(ql_dbg_disc, vha, 0x2021,
- "Skipping duplicate RSCN queue entry found at "
- "[%d].\n", rscn_out_iter);
-
- vha->rscn_out_ptr = rscn_out_iter;
- }
-
- /* Queue overflow, set switch default case. */
- if (vha->flags.rscn_queue_overflow) {
- ql_dbg(ql_dbg_disc, vha, 0x2022,
- "device_resync: rscn overflow.\n");
-
- format = 3;
- vha->flags.rscn_queue_overflow = 0;
- }
-
- switch (format) {
- case 0:
- mask = 0xffffff;
- break;
- case 1:
- mask = 0xffff00;
- break;
- case 2:
- mask = 0xff0000;
- break;
- default:
- mask = 0x0;
- d_id.b24 = 0;
- vha->rscn_out_ptr = vha->rscn_in_ptr;
- break;
- }
-
- rval = QLA_SUCCESS;
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->d_id.b24 & mask) != d_id.b24 ||
- fcport->port_type == FCT_BROADCAST)
- continue;
-
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
- if (format != 3 ||
- fcport->port_type != FCT_INITIATOR) {
- qla2x00_mark_device_lost(vha, fcport,
- 0, 0);
- }
- }
- }
- }
- return (rval);
-}
-
-/*
* qla2x00_fabric_dev_login
* Login fabric target device and update FC port database.
*
@@ -3644,6 +3499,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
} else {
qla2x00_update_fcport(vha, fcport);
}
+ } else {
+ /* Retry Login. */
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
}
return (rval);
@@ -3684,9 +3542,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
/* Login fcport on switch. */
- ha->isp_ops->fabric_login(vha, fcport->loop_id,
+ rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb, BIT_0);
+ if (rval != QLA_SUCCESS) {
+ return rval;
+ }
if (mb[0] == MBS_PORT_ID_USED) {
/*
* Device has another loop ID. The firmware team
@@ -4100,15 +3961,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
- if (IS_QLA81XX(ha))
- qla2x00_get_fw_version(vha,
- &ha->fw_major_version,
- &ha->fw_minor_version,
- &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
-
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ qla2x00_get_fw_version(vha);
if (ha->fce) {
ha->flags.fce_enabled = 1;
memset(ha->fce, 0,
@@ -4974,7 +4828,6 @@ try_blob_fw:
ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
ha->flags.running_gold_fw = 1;
-
return rval;
}
@@ -5009,6 +4862,7 @@ int
qla24xx_configure_vhba(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
+ int rval2;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
@@ -5033,12 +4887,18 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
vha->flags.management_server_logged_in = 0;
/* Login to SNS first */
- ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
- if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_init, vha, 0x0103,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
- "mb[6]=%x mb[7]=%x.\n",
- NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
+ rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
+ BIT_1);
+ if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+ if (rval2 == QLA_MEMORY_ALLOC_FAILED)
+ ql_dbg(ql_dbg_init, vha, 0x0120,
+ "Failed SNS login: loop_id=%x, rval2=%d\n",
+ NPH_SNS, rval2);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x0103,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+ "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
return (QLA_FUNCTION_FAILED);
}
@@ -5214,10 +5074,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->reset_delay = 5;
nv->max_luns_per_target = __constant_cpu_to_le16(128);
nv->port_down_retry_count = __constant_cpu_to_le16(30);
- nv->link_down_timeout = __constant_cpu_to_le16(30);
+ nv->link_down_timeout = __constant_cpu_to_le16(180);
nv->enode_mac[0] = 0x00;
- nv->enode_mac[1] = 0x02;
- nv->enode_mac[2] = 0x03;
+ nv->enode_mac[1] = 0xC0;
+ nv->enode_mac[2] = 0xDD;
nv->enode_mac[3] = 0x04;
nv->enode_mac[4] = 0x05;
nv->enode_mac[5] = 0x06 + ha->port_no;
@@ -5248,9 +5108,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
- icb->enode_mac[0] = 0x01;
- icb->enode_mac[1] = 0x02;
- icb->enode_mac[2] = 0x03;
+ icb->enode_mac[0] = 0x00;
+ icb->enode_mac[1] = 0xC0;
+ icb->enode_mac[2] = 0xDD;
icb->enode_mac[3] = 0x04;
icb->enode_mac[4] = 0x05;
icb->enode_mac[5] = 0x06 + ha->port_no;
@@ -5353,6 +5213,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (ql2xloginretrycount)
ha->login_retry_count = ql2xloginretrycount;
+ /* if not running MSI-X we need handshaking on interrupts */
+ if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+
/* Enable ZIO. */
if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 7cc4f36cd539..6e457643c639 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -72,16 +72,19 @@ static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
+ struct crc_context *ctx;
+
+ ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
- &((struct crc_context *)sp->ctx)->dsd_list, list) {
+ &ctx->dsd_list, list) {
dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
dsd_ptr->dsd_list_dma);
list_del(&dsd_ptr->list);
kfree(dsd_ptr);
}
- INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
+ INIT_LIST_HEAD(&ctx->dsd_list);
}
static inline void
@@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
return 0;
*
*/
-
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
if (ql2xenablehba_err_chk >= 1)
@@ -144,3 +146,44 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
}
+
+static inline srb_t *
+qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+{
+ srb_t *sp = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (unlikely(bail))
+ return NULL;
+
+ sp = mempool_alloc(ha->srb_mempool, flag);
+ if (!sp)
+ goto done;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
+ return sp;
+}
+
+static inline void
+qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+ init_timer(&sp->u.iocb_cmd.timer);
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->u.iocb_cmd.timer.data = (unsigned long)sp;
+ sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
+ add_timer(&sp->u.iocb_cmd.timer);
+ sp->free = qla2x00_sp_free;
+}
+
+static inline int
+qla2x00_gid_list_size(struct qla_hw_data *ha)
+{
+ return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 55a96761b5a4..eac950924497 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -22,18 +22,19 @@ static inline uint16_t
qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cflags = 0;
/* Set transfer direction */
- if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(sp->cmd);
- } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
}
return (cflags);
}
@@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
static inline int
qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
{
- uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
- "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
+ "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
return 0;
}
@@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts = 0;
/* Translate SCSI opcode to a protection opcode */
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(cmd)) {
case SCSI_PROT_READ_STRIP:
*fw_prot_opts |= PO_MODE_DIF_REMOVE;
break;
@@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
break;
}
- return scsi_prot_sg_count(sp->cmd);
+ return scsi_prot_sg_count(cmd);
}
/*
@@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
struct scatterlist *sg;
int i;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
vha = sp->fcport->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
req = ha->req_q_map[0];
rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
@@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Set target ID and LUN number*/
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
- cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
+ cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
/* Update tagged queuing modifier */
if (scsi_populate_tag_msg(cmd, tag)) {
@@ -473,7 +475,6 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
if (IS_QLA82XX(ha)) {
qla82xx_start_iocbs(vha);
@@ -487,9 +488,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -609,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -636,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = sp->ctx;
+ ctx = GET_CMD_CTX_SP(sp);
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -725,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
int i;
struct req_que *req;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
/* Update entry type to indicate Command Type 3 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -745,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(sp->cmd);
+ scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -797,7 +798,7 @@ static inline void
qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
@@ -952,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
memset(&sgx, 0, sizeof(struct qla2_sgx));
- sgx.tot_bytes = scsi_bufflen(sp->cmd);
- sgx.cur_sg = scsi_sglist(sp->cmd);
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
- sg_prot = scsi_prot_sglist(sp->cmd);
+ sg_prot = scsi_prot_sglist(cmd);
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
@@ -995,7 +996,7 @@ alloc_and_fill:
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1044,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
- scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
uint8_t *cp;
- scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1078,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1091,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
- sp->cmd);
+ i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x300b,
- "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
+ "User data buffer=%p for cmd=%p.\n", cp, cmd);
}
}
/* Null termination */
@@ -1128,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
-
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
@@ -1160,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->ctx)->dsd_list);
+ &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
@@ -1171,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
ql_dbg(ql_dbg_io, vha, 0x3027,
"%s(): %p, sg_entry %d - "
"addr=0x%x0x%x, len=%d.\n",
@@ -1182,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
- if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x3028,
"%s(): Protection Data buffer = %p.\n", __func__,
@@ -1228,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
dma_addr_t crc_ctx_dma;
char tag[2];
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
sgc = 0;
/* Update entry type to indicate Command Type CRC_2 IOCB */
@@ -1256,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA);
}
- if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
- (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0;
/* Allocate CRC context from global pool */
- crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
- GFP_ATOMIC, &crc_ctx_dma);
+ crc_ctx_pkt = sp->u.scmd.ctx =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
goto crc_queuing_error;
@@ -1310,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
fcp_cmnd->additional_cdb_len |= 2;
- int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+ int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1345,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
blk_size = cmd->device->sector_size;
dif_bytes = (data_bytes / blk_size) * 8;
- switch (scsi_get_prot_op(sp->cmd)) {
+ switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
total_bytes = data_bytes;
@@ -1445,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
char tag[2];
@@ -1510,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@@ -1529,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1611,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
uint16_t fw_prot_opts = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_crc_2 *cmd_pkt;
@@ -1728,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
/* Fill-in common area */
@@ -1744,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* Total Data and protection segment(s) */
@@ -1797,7 +1797,7 @@ queuing_error:
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
@@ -1818,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
- struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@@ -1848,15 +1847,13 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
sp->handle = handle;
/* Adjust entry-counts as needed. */
- if (sp->ctx) {
- ctx = sp->ctx;
- req_cnt = ctx->iocbs;
- }
+ if (sp->type != SRB_SCSI_CMD)
+ req_cnt = sp->iocbs;
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
- if (ha->mqenable)
+ if (ha->mqenable || IS_QLA83XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_QLA82XX(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -1889,8 +1886,7 @@ queuing_error:
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@@ -1909,8 +1905,7 @@ static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *lio = ctx->u.iocb_cmd;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;
@@ -1999,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx = sp->ctx;
- struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct srb_iocb *iocb = &sp->u.iocb_cmd;
struct req_que *req = vha->req;
flags = iocb->u.tmf.flags;
@@ -2027,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2041,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
els_iocb->opcode =
- (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
+ sp->type == SRB_ELS_CMD_RPT ?
bsg_job->request->rqst_data.r_els.els_code :
bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2078,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -2155,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
+ struct fc_bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -2245,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
- char tag[2];
+ char tag[2];
/* Setup device pointers. */
ret = 0;
reg = &ha->iobase->isp82;
- cmd = sp->cmd;
+ cmd = GET_CMD_SP(sp);
req = vha->req;
rsp = ha->rsp_q_map[0];
@@ -2354,12 +2348,14 @@ sufficient_dsds:
if (req->cnt < (req_cnt + 2))
goto queuing_error;
- ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!sp->ctx) {
+ ctx = sp->u.scmd.ctx =
+ mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!ctx) {
ql_log(ql_log_fatal, vha, 0x3010,
"Failed to allocate ctx for cmd=%p.\n", cmd);
goto queuing_error;
}
+
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@@ -2410,12 +2406,12 @@ sufficient_dsds:
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
goto queuing_error_fcp_cmnd;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -2495,9 +2491,9 @@ sufficient_dsds:
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
- sizeof(cmd_pkt->lun));
+ sizeof(cmd_pkt->lun));
/*
* Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@@ -2538,7 +2534,7 @@ sufficient_dsds:
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
wmb();
@@ -2584,9 +2580,9 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->ctx) {
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
+ if (sp->u.scmd.ctx) {
+ mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
+ sp->u.scmd.ctx = NULL;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2599,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
int rval;
struct qla_hw_data *ha = sp->fcport->vha->hw;
void *pkt;
- struct srb_ctx *ctx = sp->ctx;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
@@ -2612,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
}
rval = QLA_SUCCESS;
- switch (ctx->type) {
+ switch (sp->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_login_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 349843ea32f6..f79844ce7122 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -44,8 +44,8 @@ qla2100_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505d,
+ "%s: NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -141,8 +141,8 @@ qla2300_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x5058,
+ "%s: NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -289,7 +289,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
mb[cnt] = RD_REG_WORD(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
- "Inter-Driver Commucation %s -- "
+ "Inter-Driver Communication %s -- "
"%04x %04x %04x %04x %04x %04x %04x.\n",
event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
mb[4], mb[5], mb[6]);
@@ -318,7 +318,7 @@ void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
+ static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
@@ -328,12 +328,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
uint32_t rscn_entry, host_pid;
- uint8_t rscn_queue_index;
unsigned long flags;
/* Setup to process RIO completion. */
handle_cnt = 0;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
@@ -405,7 +404,8 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
+ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+ RD_REG_WORD(&reg24->mailbox7) : 0;
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -418,6 +418,7 @@ skip_rio:
"Unrecoverable Hardware Error: adapter "
"marked OFFLINE!\n");
vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
} else {
/* Check to see if MPI timeout occurred */
if ((mbx & MBX_3) && (ha->flags.port0))
@@ -431,6 +432,7 @@ skip_rio:
"Unrecoverable Hardware Error: adapter marked "
"OFFLINE!\n");
vha->flags.online = 0;
+ vha->device_flags |= DFLG_DEV_FAILED;
} else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -482,10 +484,10 @@ skip_rio:
ha->link_data_rate = PORT_SPEED_1GB;
} else {
link_speed = link_speeds[LS_UNKNOWN];
- if (mb[1] < 5)
+ if (mb[1] < 6)
link_speed = link_speeds[mb[1]];
else if (mb[1] == 0x13)
- link_speed = link_speeds[5];
+ link_speed = link_speeds[6];
ha->link_data_rate = mb[1];
}
@@ -497,7 +499,8 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
- mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
+ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -547,7 +550,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
ql_dbg(ql_dbg_async, vha, 0x500d,
"DCBX Completed -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
@@ -681,8 +684,6 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
- vha->flags.rscn_queue_overflow = 1;
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
@@ -711,15 +712,6 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
- rscn_queue_index = vha->rscn_in_ptr + 1;
- if (rscn_queue_index == MAX_RSCN_COUNT)
- rscn_queue_index = 0;
- if (rscn_queue_index != vha->rscn_out_ptr) {
- vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
- vha->rscn_in_ptr = rscn_queue_index;
- } else {
- vha->flags.rscn_queue_overflow = 1;
- }
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -809,6 +801,10 @@ skip_rio:
case MBA_IDC_TIME_EXT:
qla81xx_idc_event(vha, mb[0], mb[1]);
break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x5057,
+ "Unknown AEN:%04x %04x %04x %04x\n",
+ mb[0], mb[1], mb[2], mb[3]);
}
if (!vha->vp_idx && ha->num_vhosts)
@@ -845,8 +841,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
req->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
- sp->cmd->result = DID_OK << 16;
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, DID_OK << 16);
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
@@ -903,7 +898,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
- struct srb_ctx *ctx;
uint16_t *data;
uint16_t status;
@@ -911,9 +905,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- lio = ctx->u.iocb_cmd;
- type = ctx->name;
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@@ -937,7 +930,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
status = le16_to_cpu(mbx->status);
- if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
+ if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@@ -948,7 +941,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
- if (ctx->type == SRB_LOGIN_CMD) {
+ if (sp->type == SRB_LOGIN_CMD) {
fcport->port_type = FCT_TARGET;
if (le16_to_cpu(mbx->mb1) & BIT_0)
fcport->port_type = FCT_INITIATOR;
@@ -979,7 +972,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb7));
logio_done:
- lio->done(sp);
+ sp->done(vha, sp, 0);
}
static void
@@ -988,29 +981,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "CT_IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
+ int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = sp->ctx;
- bsg_job = sp_bsg->u.bsg_job;
+ bsg_job = sp->u.bsg_job;
- type = NULL;
- switch (sp_bsg->type) {
- case SRB_CT_CMD:
- type = "ct pass-through";
- break;
- default:
- ql_log(ql_log_warn, vha, 0x5047,
- "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
- return;
- }
+ type = "ct pass-through";
comp_status = le16_to_cpu(pkt->comp_status);
@@ -1022,7 +1004,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
@@ -1035,30 +1017,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_log(ql_log_warn, vha, 0x5049,
"CT pass-through-%s error "
"comp_status-status=0x%x.\n", type, comp_status);
- bsg_job->reply->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
- if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
- kfree(sp->fcport);
-
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
+ sp->done(vha, sp, res);
}
static void
@@ -1067,22 +1038,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "ELS_CT_IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
uint32_t fw_status[3];
uint8_t* fw_sts_ptr;
+ int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = sp->ctx;
- bsg_job = sp_bsg->u.bsg_job;
+ bsg_job = sp->u.bsg_job;
type = NULL;
- switch (sp_bsg->type) {
+ switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "els";
@@ -1091,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- ql_log(ql_log_warn, vha, 0x503e,
- "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
+ ql_dbg(ql_dbg_user, vha, 0x503e,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
return;
}
@@ -1108,11 +1077,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status != CS_COMPLETE) {
if (comp_status == CS_DATA_UNDERRUN) {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+ le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
- ql_log(ql_log_info, vha, 0x503f,
+ ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
@@ -1122,7 +1091,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
- ql_log(ql_log_info, vha, 0x5040,
+ ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
@@ -1130,32 +1099,21 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_2));
- bsg_job->reply->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
+ ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
(uint8_t *)pkt, sizeof(*pkt));
}
else {
- bsg_job->reply->result = DID_OK << 16;
+ res = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
- (sp_bsg->type == SRB_CT_CMD))
- kfree(sp->fcport);
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
+ sp->done(vha, sp, res);
}
static void
@@ -1167,7 +1125,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *lio;
- struct srb_ctx *ctx;
uint16_t *data;
uint32_t iop[2];
@@ -1175,9 +1132,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- lio = ctx->u.iocb_cmd;
- type = ctx->name;
+ lio = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
data = lio->u.logio.data;
@@ -1185,7 +1141,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
- ql_log(ql_log_warn, vha, 0x5034,
+ ql_log(ql_log_warn, fcport->vha, 0x5034,
"Async-%s error entry - hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
type, sp->handle, fcport->d_id.b.domain,
@@ -1198,14 +1154,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- ql_dbg(ql_dbg_async, vha, 0x5036,
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
"Async-%s complete - hdl=%x portid=%02x%02x%02x "
"iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
- if (ctx->type != SRB_LOGIN_CMD)
+ if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1239,7 +1195,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_dbg(ql_dbg_async, vha, 0x5037,
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
"Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
"iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
@@ -1248,7 +1204,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[1]));
logio_done:
- lio->done(sp);
+ sp->done(vha, sp, 0);
}
static void
@@ -1260,7 +1216,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fc_port_t *fcport;
srb_t *sp;
struct srb_iocb *iocb;
- struct srb_ctx *ctx;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
int error = 1;
@@ -1268,30 +1223,29 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
- ctx = sp->ctx;
- iocb = ctx->u.iocb_cmd;
- type = ctx->name;
+ iocb = &sp->u.iocb_cmd;
+ type = sp->name;
fcport = sp->fcport;
if (sts->entry_status) {
- ql_log(ql_log_warn, vha, 0x5038,
+ ql_log(ql_log_warn, fcport->vha, 0x5038,
"Async-%s error - hdl=%x entry-status(%x).\n",
type, sp->handle, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- ql_log(ql_log_warn, vha, 0x5039,
+ ql_log(ql_log_warn, fcport->vha, 0x5039,
"Async-%s error - hdl=%x completion status(%x).\n",
type, sp->handle, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- ql_log(ql_log_warn, vha, 0x503a,
+ ql_log(ql_log_warn, fcport->vha, 0x503a,
"Async-%s error - hdl=%x no response info(%x).\n",
type, sp->handle, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_log(ql_log_warn, vha, 0x503b,
+ ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
type, sp->handle, sts->rsp_data_len);
} else if (sts->data[3]) {
- ql_log(ql_log_warn, vha, 0x503c,
+ ql_log(ql_log_warn, fcport->vha, 0x503c,
"Async-%s error - hdl=%x response(%x).\n",
type, sp->handle, sts->data[3]);
} else {
@@ -1304,7 +1258,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
(uint8_t *)sts, sizeof(*sts));
}
- iocb->done(sp);
+ sp->done(vha, sp, 0);
}
/**
@@ -1390,25 +1344,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
- uint32_t sense_len, struct rsp_que *rsp)
+ uint32_t sense_len, struct rsp_que *rsp, int res)
{
struct scsi_qla_host *vha = sp->fcport->vha;
- struct scsi_cmnd *cp = sp->cmd;
+ struct scsi_cmnd *cp = GET_CMD_SP(sp);
+ uint32_t track_sense_len;
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE;
- sp->request_sense_length = sense_len;
- sp->request_sense_ptr = cp->sense_buffer;
- if (sp->request_sense_length > par_sense_len)
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+ track_sense_len = sense_len;
+
+ if (sense_len > par_sense_len)
sense_len = par_sense_len;
memcpy(cp->sense_buffer, sense_data, sense_len);
- sp->request_sense_ptr += sense_len;
- sp->request_sense_length -= sense_len;
- if (sp->request_sense_length != 0)
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+ track_sense_len -= sense_len;
+ SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+ if (track_sense_len != 0) {
rsp->status_srb = sp;
+ cp->result = res;
+ }
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@@ -1436,7 +1397,7 @@ static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
struct scsi_qla_host *vha = sp->fcport->vha;
- struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t *ap = &sts24->data[12];
uint8_t *ep = &sts24->data[20];
uint32_t e_ref_tag, a_ref_tag;
@@ -1580,6 +1541,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint16_t que;
struct req_que *req;
int logit = 1;
+ int res = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -1619,7 +1581,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
qla2xxx_wake_dpc(vha);
return;
}
- cp = sp->cmd;
+ cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
@@ -1668,11 +1630,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- ql_dbg(ql_dbg_io, vha, 0x3019,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
- cp->result = DID_BUS_BUSY << 16;
+ res = DID_BUS_BUSY << 16;
goto out;
}
}
@@ -1689,7 +1651,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
case CS_COMPLETE:
case CS_QUEUE_FULL:
if (scsi_status == 0) {
- cp->result = DID_OK << 16;
+ res = DID_OK << 16;
break;
}
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@@ -1699,19 +1661,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_dbg(ql_dbg_io, vha, 0x301a,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
}
- cp->result = DID_OK << 16 | lscsi_status;
+ res = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_dbg(ql_dbg_io, vha, 0x301b,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
"QUEUE FULL detected.\n");
break;
}
@@ -1724,7 +1686,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
- rsp);
+ rsp, res);
break;
case CS_DATA_UNDERRUN:
@@ -1733,36 +1695,36 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_dbg(ql_dbg_io, vha, 0x301d,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
"Dropped frame(s) detected "
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16 | lscsi_status;
+ res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_dbg(ql_dbg_io, vha, 0x301e,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
} else {
- ql_dbg(ql_dbg_io, vha, 0x301f,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
- cp->result = DID_ERROR << 16 | lscsi_status;
+ res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
- cp->result = DID_OK << 16 | lscsi_status;
+ res = DID_OK << 16 | lscsi_status;
logit = 0;
check_scsi_status:
@@ -1772,7 +1734,7 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_dbg(ql_dbg_io, vha, 0x3020,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
"QUEUE FULL detected.\n");
logit = 1;
break;
@@ -1785,7 +1747,7 @@ check_scsi_status:
break;
qla2x00_handle_sense(sp, sense_data, par_sense_len,
- sense_len, rsp);
+ sense_len, rsp, res);
}
break;
@@ -1802,7 +1764,7 @@ check_scsi_status:
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
- cp->result = DID_TRANSPORT_DISRUPTED << 16;
+ res = DID_TRANSPORT_DISRUPTED << 16;
if (comp_status == CS_TIMEOUT) {
if (IS_FWI2_CAPABLE(ha))
@@ -1812,7 +1774,7 @@ check_scsi_status:
break;
}
- ql_dbg(ql_dbg_io, vha, 0x3021,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
"Port down status: port-state=0x%x.\n",
atomic_read(&fcport->state));
@@ -1821,25 +1783,25 @@ check_scsi_status:
break;
case CS_ABORTED:
- cp->result = DID_RESET << 16;
+ res = DID_RESET << 16;
break;
case CS_DIF_ERROR:
logit = qla2x00_handle_dif_error(sp, sts24);
break;
default:
- cp->result = DID_ERROR << 16;
+ res = DID_ERROR << 16;
break;
}
out:
if (logit)
- ql_dbg(ql_dbg_io, vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) "
"nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
"cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
- comp_status, scsi_status, cp->result, vha->host_no,
+ comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@@ -1848,7 +1810,7 @@ out:
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, res);
}
/**
@@ -1861,84 +1823,52 @@ out:
static void
qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
- uint8_t sense_sz = 0;
+ uint8_t sense_sz = 0;
struct qla_hw_data *ha = rsp->hw;
struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
- srb_t *sp = rsp->status_srb;
+ srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
+ uint32_t sense_len;
+ uint8_t *sense_ptr;
- if (sp != NULL && sp->request_sense_length != 0) {
- cp = sp->cmd;
- if (cp == NULL) {
- ql_log(ql_log_warn, vha, 0x3025,
- "cmd is NULL: already returned to OS (sp=%p).\n",
- sp);
+ if (!sp || !GET_CMD_SENSE_LEN(sp))
+ return;
- rsp->status_srb = NULL;
- return;
- }
+ sense_len = GET_CMD_SENSE_LEN(sp);
+ sense_ptr = GET_CMD_SENSE_PTR(sp);
- if (sp->request_sense_length > sizeof(pkt->data)) {
- sense_sz = sizeof(pkt->data);
- } else {
- sense_sz = sp->request_sense_length;
- }
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_log(ql_log_warn, vha, 0x3025,
+ "cmd is NULL: already returned to OS (sp=%p).\n", sp);
- /* Move sense data. */
- if (IS_FWI2_CAPABLE(ha))
- host_to_fcp_swap(pkt->data, sizeof(pkt->data));
- memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
- sp->request_sense_ptr, sense_sz);
-
- sp->request_sense_ptr += sense_sz;
- sp->request_sense_length -= sense_sz;
-
- /* Place command on done queue. */
- if (sp->request_sense_length == 0) {
- rsp->status_srb = NULL;
- qla2x00_sp_compl(ha, sp);
- }
+ rsp->status_srb = NULL;
+ return;
}
-}
-static int
-qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
-{
- struct qla_hw_data *ha = vha->hw;
- struct srb_ctx *ctx;
+ if (sense_len > sizeof(pkt->data))
+ sense_sz = sizeof(pkt->data);
+ else
+ sense_sz = sense_len;
- if (!sp->ctx)
- return 1;
+ /* Move sense data. */
+ if (IS_FWI2_CAPABLE(ha))
+ host_to_fcp_swap(pkt->data, sizeof(pkt->data));
+ memcpy(sense_ptr, pkt->data, sense_sz);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+ sense_ptr, sense_sz);
- ctx = sp->ctx;
+ sense_len -= sense_sz;
+ sense_ptr += sense_sz;
- if (ctx->type == SRB_LOGIN_CMD ||
- ctx->type == SRB_LOGOUT_CMD ||
- ctx->type == SRB_TM_CMD) {
- ctx->u.iocb_cmd->done(sp);
- return 0;
- } else if (ctx->type == SRB_ADISC_CMD) {
- ctx->u.iocb_cmd->free(sp);
- return 0;
- } else {
- struct fc_bsg_job *bsg_job;
-
- bsg_job = ctx->u.bsg_job;
- if (ctx->type == SRB_ELS_CMD_HST ||
- ctx->type == SRB_CT_CMD)
- kfree(sp->fcport);
-
- bsg_job->reply->reply_data.ctels_reply.status =
- FC_CTELS_STATUS_OK;
- bsg_job->reply->result = DID_ERROR << 16;
- bsg_job->reply->reply_payload_rcv_len = 0;
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- bsg_job->job_done(bsg_job);
- return 0;
+ SET_CMD_SENSE_PTR(sp, sense_ptr);
+ SET_CMD_SENSE_LEN(sp, sense_len);
+
+ /* Place command on done queue. */
+ if (sense_len == 0) {
+ rsp->status_srb = NULL;
+ sp->done(ha, sp, cp->result);
}
- return 1;
}
/**
@@ -1953,53 +1883,34 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
struct qla_hw_data *ha = vha->hw;
const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
- struct req_que *req = ha->req_q_map[que];
-
- if (pkt->entry_status & RF_INV_E_ORDER)
- ql_dbg(ql_dbg_async, vha, 0x502a,
- "Invalid Entry Order.\n");
- else if (pkt->entry_status & RF_INV_E_COUNT)
- ql_dbg(ql_dbg_async, vha, 0x502b,
- "Invalid Entry Count.\n");
- else if (pkt->entry_status & RF_INV_E_PARAM)
- ql_dbg(ql_dbg_async, vha, 0x502c,
- "Invalid Entry Parameter.\n");
- else if (pkt->entry_status & RF_INV_E_TYPE)
- ql_dbg(ql_dbg_async, vha, 0x502d,
- "Invalid Entry Type.\n");
- else if (pkt->entry_status & RF_BUSY)
- ql_dbg(ql_dbg_async, vha, 0x502e,
- "Busy.\n");
- else
- ql_dbg(ql_dbg_async, vha, 0x502f,
- "UNKNOWN flag error.\n");
+ struct req_que *req = NULL;
+ int res = DID_ERROR << 16;
+
+ ql_dbg(ql_dbg_async, vha, 0x502a,
+ "type of error status in response: 0x%x\n", pkt->entry_status);
+
+ if (que >= ha->max_req_queues || !ha->req_q_map[que])
+ goto fatal;
+
+ req = ha->req_q_map[que];
+
+ if (pkt->entry_status & RF_BUSY)
+ res = DID_BUS_BUSY << 16;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- if (qla2x00_free_sp_ctx(vha, sp)) {
- if (pkt->entry_status &
- (RF_INV_E_ORDER | RF_INV_E_COUNT |
- RF_INV_E_PARAM | RF_INV_E_TYPE)) {
- sp->cmd->result = DID_ERROR << 16;
- } else if (pkt->entry_status & RF_BUSY) {
- sp->cmd->result = DID_BUS_BUSY << 16;
- } else {
- sp->cmd->result = DID_ERROR << 16;
- }
- qla2x00_sp_compl(ha, sp);
- }
- } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
- COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
- || pkt->entry_type == COMMAND_TYPE_6) {
- ql_log(ql_log_warn, vha, 0x5030,
- "Error entry - invalid handle.\n");
-
- if (IS_QLA82XX(ha))
- set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
- else
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ sp->done(ha, sp, res);
+ return;
}
+fatal:
+ ql_log(ql_log_warn, vha, 0x5030,
+ "Error entry - invalid handle/queue.\n");
+
+ if (IS_QLA82XX(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
}
/**
@@ -2127,7 +2038,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return;
rval = QLA_SUCCESS;
@@ -2168,7 +2079,7 @@ done:
}
/**
- * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
* @irq:
* @dev_id: SCSI driver HA context
*
@@ -2192,8 +2103,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x5059,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2276,8 +2187,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505a,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2306,8 +2217,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2340,8 +2251,8 @@ qla24xx_msix_default(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0x505c,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2530,8 +2441,14 @@ msix_failed:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
- ha->mqenable = 1;
+ if (IS_QLA83XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
+ } else
+ if (ha->mqiobase
+ && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+ ha->mqenable = 1;
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
@@ -2552,8 +2469,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
- if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2615,7 +2532,7 @@ clear_risc_ints:
* FIXME: Noted that 8014s were being dropped during NK testing.
* Timing deltas during MSI-X/INTa transitions?
*/
- if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
goto fail;
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 08f1d01bdc1c..b4a23394a7bd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,17 +46,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
if (ha->pdev->error_state > pci_channel_io_frozen) {
- ql_log(ql_log_warn, base_vha, 0x1001,
+ ql_log(ql_log_warn, vha, 0x1001,
"error_state is greater than pci_channel_io_frozen, "
"exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
if (vha->device_flags & DFLG_DEV_FAILED) {
- ql_log(ql_log_warn, base_vha, 0x1002,
+ ql_log(ql_log_warn, vha, 0x1002,
"Device in failed state, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -69,7 +69,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.pci_channel_io_perm_failure) {
- ql_log(ql_log_warn, base_vha, 0x1003,
+ ql_log(ql_log_warn, vha, 0x1003,
"Perm failure on EEH timeout MBX, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -77,7 +77,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- ql_log(ql_log_warn, base_vha, 0x1004,
+ ql_log(ql_log_warn, vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
return QLA_FUNCTION_TIMEOUT;
}
@@ -89,8 +89,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- ql_log(ql_log_warn, base_vha, 0x1005,
- "Cmd access timeout, Exiting.\n");
+ ql_log(ql_log_warn, vha, 0x1005,
+ "Cmd access timeout, cmd=0x%x, Exiting.\n",
+ mcp->mb[0]);
return QLA_FUNCTION_TIMEOUT;
}
@@ -98,7 +99,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Save mailbox command for debug */
ha->mcp = mcp;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
+ ql_dbg(ql_dbg_mbx, vha, 0x1006,
"Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -127,28 +128,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr++;
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
"Loaded MBX registers (displayed in bytes) =.\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
(uint8_t *)mcp->mb, 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
((uint8_t *)mcp->mb + 0x10), 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
((uint8_t *)mcp->mb + 0x20), 8);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
"I/O Address = %p.\n", optr);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Unlock mbx registers and wait for interrupt */
- ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
+ ql_dbg(ql_dbg_mbx, vha, 0x100f,
"Going to unlock irq & waiting for interrupts. "
"jiffies=%lx.\n", jiffies);
@@ -163,7 +164,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
+ ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -180,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
} else {
- ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
+ ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
if (IS_QLA82XX(ha)) {
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
ha->flags.mbox_busy = 0;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
+ ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -214,7 +215,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
- ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
+ ql_dbg(ql_dbg_mbx, vha, 0x1013,
"Waited %d sec.\n",
(uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
@@ -223,7 +224,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.mbox_int) {
uint16_t *iptr2;
- ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
+ ql_dbg(ql_dbg_mbx, vha, 0x1014,
"Cmd=%x completed.\n", command);
/* Got interrupt. Clear the flag. */
@@ -236,7 +237,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
- ql_log(ql_log_warn, base_vha, 0x1015,
+ ql_log(ql_log_warn, vha, 0x1015,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -268,13 +269,19 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x.\n", command);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
"iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
"mb[0] = 0x%x.\n", mb0);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
+
+ /*
+ * Attempt to capture a firmware dump for further analysis
+ * of the current firmware state
+ */
+ ha->isp_ops->fw_dump(vha, 0);
rval = QLA_FUNCTION_TIMEOUT;
}
@@ -285,7 +292,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->mcp = NULL;
if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
- ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
+ ql_dbg(ql_dbg_mbx, vha, 0x101a,
"Checking for additional resp interrupt.\n");
/* polling mode for non isp_abort commands. */
@@ -297,7 +304,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
- ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
+ ql_dbg(ql_dbg_mbx, vha, 0x101b,
"Timeout, schedule isp_abort_needed.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -313,15 +320,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101c,
- "Mailbox cmd timeout occured. "
- "Scheduling ISP abort eeh_busy=0x%x.\n",
- ha->flags.eeh_busy);
+ "Mailbox cmd timeout occured, cmd=0x%x, "
+ "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+ "abort.\n", command, mcp->mb[0],
+ ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
} else if (!abort_active) {
/* call abort directly since we are in the DPC thread */
- ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
+ ql_dbg(ql_dbg_mbx, vha, 0x101d,
"Timeout, calling abort_isp.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -337,9 +345,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101e,
- "Mailbox cmd timeout occured. "
- "Scheduling ISP abort.\n");
-
+ "Mailbox cmd timeout occured, cmd=0x%x, "
+ "mb[0]=0x%x. Scheduling ISP abort ",
+ command, mcp->mb[0]);
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
/* Allow next mbx cmd to come in. */
@@ -350,7 +358,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
&vha->dpc_flags);
}
clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
- ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
+ ql_dbg(ql_dbg_mbx, vha, 0x101f,
"Finished abort_isp.\n");
goto mbx_done;
}
@@ -364,8 +372,8 @@ premature_exit:
mbx_done:
if (rval) {
ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
- "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
- mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@@ -455,7 +463,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
struct nvram_81xx *nv = ha->nvram;
mcp->mb[4] = (nv->enhanced_features &
EXTENDED_BB_CREDITS);
@@ -508,21 +516,22 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
* Kernel context.
*/
int
-qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
- uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
- uint32_t *mpi_caps, uint8_t *phy)
+qla2x00_get_fw_version(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+ if (IS_QLA83XX(vha->hw))
+ mcp->in_mb |= MBX_17|MBX_16|MBX_15;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -530,23 +539,37 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
goto failed;
/* Return mailbox data. */
- *major = mcp->mb[1];
- *minor = mcp->mb[2];
- *subminor = mcp->mb[3];
- *attributes = mcp->mb[6];
+ ha->fw_major_version = mcp->mb[1];
+ ha->fw_minor_version = mcp->mb[2];
+ ha->fw_subminor_version = mcp->mb[3];
+ ha->fw_attributes = mcp->mb[6];
if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
- *memory = 0x1FFFF; /* Defaults to 128KB. */
+ ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
else
- *memory = (mcp->mb[5] << 16) | mcp->mb[4];
- if (IS_QLA81XX(vha->hw)) {
- mpi[0] = mcp->mb[10] & 0xff;
- mpi[1] = mcp->mb[11] >> 8;
- mpi[2] = mcp->mb[11] & 0xff;
- *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
- phy[0] = mcp->mb[8] & 0xff;
- phy[1] = mcp->mb[9] >> 8;
- phy[2] = mcp->mb[9] & 0xff;
+ ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+ ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
+ ha->phy_version[0] = mcp->mb[8] & 0xff;
+ ha->phy_version[1] = mcp->mb[9] >> 8;
+ ha->phy_version[2] = mcp->mb[9] & 0xff;
+ }
+ if (IS_QLA83XX(ha)) {
+ if (mcp->mb[6] & BIT_15) {
+ ha->fw_attributes_h = mcp->mb[15];
+ ha->fw_attributes_ext[0] = mcp->mb[16];
+ ha->fw_attributes_ext[1] = mcp->mb[17];
+ ql_dbg(ql_dbg_mbx, vha, 0x1139,
+ "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+ __func__, mcp->mb[15], mcp->mb[6]);
+ } else
+ ql_dbg(ql_dbg_mbx, vha, 0x112f,
+ "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
+ __func__, mcp->mb[6]);
}
+
failed:
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -859,6 +882,7 @@ qla2x00_abort_command(srb_t *sp)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
@@ -881,7 +905,7 @@ qla2x00_abort_command(srb_t *sp)
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = (uint16_t)handle;
mcp->mb[3] = (uint16_t)(handle >> 16);
- mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
+ mcp->mb[6] = (uint16_t)cmd->device->lun;
mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -1028,7 +1052,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -1052,7 +1076,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
- if (IS_QLA8XXX_TYPE(vha->hw)) {
+ if (IS_CNA_CAPABLE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
vha->fcoe_fcf_idx = mcp->mb[10];
vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1163,7 +1187,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
+ if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
mcp->mb[1] = BIT_0;
mcp->mb[10] = MSW(ha->ex_init_cb_dma);
mcp->mb[11] = LSW(ha->ex_init_cb_dma);
@@ -1172,7 +1196,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->mb[14] = sizeof(*ha->ex_init_cb);
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
}
- mcp->in_mb = MBX_0;
+ /* 1 and 2 should normally be captured. */
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha))
+ /* mb3 is additional info about the installed SFP. */
+ mcp->in_mb |= MBX_3;
mcp->buf_size = size;
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS;
@@ -1181,7 +1209,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x104d,
- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
} else {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
@@ -1260,6 +1289,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
if (IS_FWI2_CAPABLE(ha)) {
+ uint64_t zero = 0;
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
@@ -1273,6 +1303,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
}
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd24->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
/* Names are little-endian. */
memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
@@ -1289,6 +1327,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
else
fcport->port_type = FCT_TARGET;
} else {
+ uint64_t zero = 0;
+
/* Check for logged in state. */
if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
@@ -1301,6 +1341,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
goto gpd_error_out;
}
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
/* Names are little-endian. */
memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
@@ -1481,7 +1529,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
- if (IS_QLA8XXX_TYPE(vha->hw)) {
+ if (IS_CNA_CAPABLE(vha->hw)) {
/* Logout across all FCFs. */
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_1;
@@ -1622,7 +1670,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->port_id[1] = area;
lg->port_id[2] = domain;
lg->vp_index = vha->vp_idx;
- rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1063,
"Failed to issue login IOCB (%x).\n", rval);
@@ -1885,8 +1934,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->port_id[1] = area;
lg->port_id[2] = domain;
lg->vp_index = vha->vp_idx;
-
- rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
+ rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+ (ha->r_a_tov / 10 * 2) + 2);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x106f,
"Failed to issue logout IOCB (%x).\n", rval);
@@ -2094,7 +2143,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
mcp->in_mb |= MBX_12;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -2121,7 +2170,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
*orig_iocb_cnt = mcp->mb[10];
if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(vha->hw) && max_fcfs)
+ if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
*max_fcfs = mcp->mb[12];
}
@@ -2686,7 +2735,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
- if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+ !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -2828,7 +2878,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
else
mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3298,6 +3348,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
mcp->mb[12] = req->qos;
mcp->mb[11] = req->vp_idx;
mcp->mb[13] = req->rid;
+ if (IS_QLA83XX(ha))
+ mcp->mb[15] = 0;
reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
QLA_QUE_PAGE * req->id);
@@ -3311,12 +3363,21 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->flags = MBX_DMA_OUT;
- mcp->tov = 60;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_QLA83XX(ha)) {
+ mcp->out_mb |= MBX_15;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ if (!IS_QLA83XX(ha))
+ WRT_REG_DWORD(&reg->req_q_out, 0);
}
req->req_q_in = &reg->req_q_in;
req->req_q_out = &reg->req_q_out;
@@ -3354,6 +3415,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mcp->mb[5] = rsp->length;
mcp->mb[14] = rsp->msix->entry;
mcp->mb[13] = rsp->rid;
+ if (IS_QLA83XX(ha))
+ mcp->mb[15] = 0;
reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
QLA_QUE_PAGE * rsp->id);
@@ -3367,12 +3430,23 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->flags = MBX_DMA_OUT;
- mcp->tov = 60;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+ if (IS_QLA81XX(ha)) {
+ mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ } else if (IS_QLA83XX(ha)) {
+ mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+ }
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
WRT_REG_DWORD(&reg->rsp_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ if (!IS_QLA83XX(ha))
+ WRT_REG_DWORD(&reg->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3424,7 +3498,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3454,7 +3528,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
@@ -3486,7 +3560,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
@@ -3641,7 +3715,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_XGMAC_STATS;
@@ -3680,7 +3754,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
- if (!IS_QLA8XXX_TYPE(vha->hw))
+ if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_DCBX_PARAMS;
@@ -3775,7 +3849,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(vha->hw))
+ if (IS_CNA_CAPABLE(vha->hw))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3813,7 +3887,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_CNA_CAPABLE(ha)) {
mcp->mb[1] |= BIT_15;
mcp->mb[2] = vha->fcoe_fcf_idx;
}
@@ -3831,13 +3905,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA8XXX_TYPE(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -3976,6 +4051,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
return rval;
}
+
int
qla2x00_get_data_rate(scsi_qla_host_t *vha)
{
@@ -3993,6 +4069,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA83XX(ha))
+ mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -4018,7 +4096,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -4299,6 +4377,90 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
}
int
+qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_SET_LED_CONFIG;
+ mcp->mb[1] = led_cfg[0];
+ mcp->mb[2] = led_cfg[1];
+ if (IS_QLA8031(ha)) {
+ mcp->mb[3] = led_cfg[2];
+ mcp->mb[4] = led_cfg[3];
+ mcp->mb[5] = led_cfg[4];
+ mcp->mb[6] = led_cfg[5];
+ }
+
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1134,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_GET_LED_CONFIG;
+
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ if (IS_QLA8031(ha))
+ mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1137,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ led_cfg[0] = mcp->mb[1];
+ led_cfg[1] = mcp->mb[2];
+ if (IS_QLA8031(ha)) {
+ led_cfg[2] = mcp->mb[3];
+ led_cfg[3] = mcp->mb[4];
+ led_cfg[4] = mcp->mb[5];
+ led_cfg[5] = mcp->mb[6];
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
{
int rval;
@@ -4321,7 +4483,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
mcp->out_mb = MBX_7|MBX_0;
mcp->in_mb = MBX_0;
- mcp->tov = 30;
+ mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -4335,3 +4497,75 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
return rval;
}
+
+int
+qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+ mcp->mb[1] = LSW(reg);
+ mcp->mb[2] = MSW(reg);
+ mcp->mb[3] = LSW(data);
+ mcp->mb[4] = MSW(data);
+ mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1131,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1132,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x113b,
+ "Implicit LOGO Unsupported.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+
+ ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
+
+ /* Perform Implicit LOGO. */
+ mcp->mb[0] = MBC_PORT_LOGOUT;
+ mcp->mb[1] = fcport->loop_id;
+ mcp->mb[10] = BIT_15;
+ mcp->out_mb = MBX_10|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_mbx, vha, 0x113d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ else
+ ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+
+ return rval;
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f488cc69fc79..aa062a1b0ca4 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -479,7 +479,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_channel = MAX_BUSES - 1;
host->max_lun = ql2xmaxlun;
host->unique_id = host->host_no;
- host->max_id = MAX_TARGETS_2200;
+ host->max_id = ha->max_fibre_devices;
host->transportt = qla2xxx_transport_vport_template;
ql_dbg(ql_dbg_vport, vha, 0xa007,
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 270ba3130fde..f0528539bbbc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -908,27 +908,37 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
return 0;
}
+int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+ uint32_t off_value, rval = 0;
+
+ WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ (off & 0xFFFF0000));
+
+ /* Read back value to make sure write has gone through */
+ RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ off_value = (off & 0x0000FFFF);
+
+ if (flag)
+ WRT_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
+ data);
+ else
+ rval = RD_REG_DWORD((void *)
+ (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+
+ return rval;
+}
+
static int
qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ /* Dword reads to flash. */
+ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
+ *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
+ (addr & 0x0000FFFF), 0, 0);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
- qla82xx_wait_rom_busy(ha);
- if (qla82xx_wait_rom_done(ha)) {
- ql_log(ql_log_fatal, vha, 0x00ba,
- "Error waiting for rom done.\n");
- return -1;
- }
- /* Reset abyte_cnt and dummy_byte_cnt */
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- udelay(10);
- cond_resched();
- qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
- *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
return 0;
}
@@ -2040,8 +2050,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- printk(KERN_INFO
- "%s(): NULL response queue pointer.\n", __func__);
+ ql_log(ql_log_info, NULL, 0xb054,
+ "%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -3136,12 +3146,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
fw_minor_version = ha->fw_minor_version;
fw_subminor_version = ha->fw_subminor_version;
- rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
- &ha->fw_minor_version, &ha->fw_subminor_version,
- &ha->fw_attributes, &ha->fw_memory_size,
- ha->mpi_version, &ha->mpi_capabilities,
- ha->phy_version);
-
+ rval = qla2x00_get_fw_version(vha);
if (rval != QLA_SUCCESS)
return rval;
@@ -3150,7 +3155,6 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
if (fw_major_version != ha->fw_major_version ||
fw_minor_version != ha->fw_minor_version ||
fw_subminor_version != ha->fw_subminor_version) {
-
ql_log(ql_log_info, vha, 0xb02d,
"Firmware version differs "
"Previous version: %d:%d:%d - "
@@ -3614,7 +3618,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if (!sp->ctx ||
+ if (!sp->u.scmd.ctx ||
(sp->flags & SRB_FCP_CMND_DMA_VALID)) {
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
@@ -3645,29 +3649,6 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
}
/* Minidump related functions */
-int
-qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
-{
- uint32_t off_value, rval = 0;
-
- WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
- (off & 0xFFFF0000));
-
- /* Read back value to make sure write has gone through */
- RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
- off_value = (off & 0x0000FFFF);
-
- if (flag)
- WRT_REG_DWORD((void *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
- data);
- else
- rval = RD_REG_DWORD((void *)
- (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
-
- return rval;
-}
-
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
@@ -4117,8 +4098,9 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
data_ptr = (uint32_t *)ha->md_dump;
if (ha->fw_dumped) {
- ql_log(ql_log_info, vha, 0xb037,
- "Firmware dump available to retrive\n");
+ ql_log(ql_log_warn, vha, 0xb037,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
goto md_failed;
}
@@ -4161,7 +4143,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
total_data_size = ha->md_dump_size;
- ql_dbg(ql_log_info, vha, 0xb03d,
+ ql_dbg(ql_dbg_p3p, vha, 0xb03d,
"Total minidump data_size 0x%x to be captured\n", total_data_size);
/* Check whether template obtained is valid */
@@ -4284,7 +4266,7 @@ skip_nxt_entry:
}
if (data_collected != total_data_size) {
- ql_dbg(ql_log_warn, vha, 0xb043,
+ ql_dbg(ql_dbg_p3p, vha, 0xb043,
"MiniDump data mismatch: Data collected: [0x%x],"
"total_data_size:[0x%x]\n",
data_collected, total_data_size);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57a226be339a..4ac50e274661 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -888,7 +888,8 @@ struct ct6_dsd {
};
#define MBC_TOGGLE_INTERRUPT 0x10
-#define MBC_SET_LED_CONFIG 0x125
+#define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */
+#define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */
/* Flash offset */
#define FLT_REG_BOOTLOAD_82XX 0x72
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 036030c95339..a2f999273a5f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
struct req_que **, struct rsp_que **);
static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
-static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
return str;
}
-static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd)
+void
+qla2x00_sp_free_dma(void *vha, void *ptr)
{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ void *ctx = GET_CMD_CTX_SP(sp);
- sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
- if (!sp) {
- ql_log(ql_log_warn, vha, 0x3006,
- "Memory allocation failed for sp.\n");
- return sp;
+ if (sp->flags & SRB_DMA_VALID) {
+ scsi_dma_unmap(cmd);
+ sp->flags &= ~SRB_DMA_VALID;
}
- atomic_set(&sp->ref_count, 1);
- sp->fcport = fcport;
- sp->cmd = cmd;
- sp->flags = 0;
- CMD_SP(cmd) = (void *)sp;
- sp->ctx = NULL;
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
- return sp;
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, ctx,
+ ((struct crc_context *)ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+ ctx1->fcp_cmnd_dma);
+ list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ mempool_free(ctx1, ha->ctx_mempool);
+ ctx1 = NULL;
+ }
+
+ CMD_SP(cmd) = NULL;
+ mempool_free(sp, ha->srb_mempool);
+}
+
+static void
+qla2x00_sp_compl(void *data, void *ptr, int res)
+{
+ struct qla_hw_data *ha = (struct qla_hw_data *)data;
+ srb_t *sp = (srb_t *)ptr;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+ cmd->result = res;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, GET_CMD_SP(sp));
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
+ return;
+ }
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ qla2x00_sp_free_dma(ha, sp);
+ cmd->scsi_done(cmd);
}
static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
+ sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
+ sp->u.scmd.cmd = cmd;
+ sp->type = SRB_SCSI_CMD;
+ atomic_set(&sp->ref_count, 1);
+ CMD_SP(cmd) = (void *)sp;
+ sp->free = qla2x00_sp_free_dma;
+ sp->done = qla2x00_sp_compl;
+
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_sp_free_dma(ha, sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- qla2x00_sp_compl(ha, sp);
+ sp->done(ha, sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
srb_t *sp;
+ struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if ((sp->ctx) && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
+ cmd = GET_CMD_SP(sp);
switch (type) {
case WAIT_HOST:
match = 1;
break;
case WAIT_TARGET:
- match = sp->cmd->device->id == t;
+ match = cmd->device->id == t;
break;
case WAIT_LUN:
- match = (sp->cmd->device->id == t &&
- sp->cmd->device->lun == l);
+ match = (cmd->device->id == t &&
+ cmd->device->lun == l);
break;
}
if (!match)
continue;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- status = qla2x00_eh_wait_on_command(sp->cmd);
+ status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1219,7 +1273,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que, cnt;
unsigned long flags;
srb_t *sp;
- struct srb_ctx *ctx;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx ||
- (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
- IS_PROT_IO(sp)) {
- sp->cmd->result = res;
- qla2x00_sp_compl(ha, sp);
- } else {
- ctx = sp->ctx;
- if (ctx->type == SRB_ELS_CMD_RPT ||
- ctx->type == SRB_ELS_CMD_HST ||
- ctx->type == SRB_CT_CMD) {
- struct fc_bsg_job *bsg_job =
- ctx->u.bsg_job;
- if (bsg_job->request->msgcode
- == FC_BSG_HST_CT)
- kfree(sp->fcport);
- bsg_job->req->errors = 0;
- bsg_job->reply->result = res;
- bsg_job->job_done(bsg_job);
- kfree(sp->ctx);
- mempool_free(sp,
- ha->srb_mempool);
- } else {
- ctx->u.iocb_cmd->free(sp);
- }
- }
+ sp->done(vha, sp, res);
}
}
}
@@ -1488,9 +1517,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
@@ -1593,6 +1619,96 @@ iospace_error_exit:
}
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint16_t msix;
+ int cpus;
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+ "Invalid PCI mem region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+ /* 83XX 26XX always use MQ type access for queues
+ * - mbar 2, a.k.a region 4 */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+ pci_resource_len(ha->pdev, 4));
+
+ if (!ha->mqiobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+ "BAR2/region4 not enabled\n");
+ goto mqiobase_exit;
+ }
+
+ ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+ pci_resource_len(ha->pdev, 2));
+ if (ha->msixbase) {
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev,
+ QLA_83XX_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+ "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1769,7 +1885,7 @@ static struct isp_operations qla81xx_isp_ops = {
.fw_dump = qla81xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
- .beacon_blink = qla24xx_beacon_blink,
+ .beacon_blink = qla83xx_beacon_blink,
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
@@ -1815,6 +1931,43 @@ static struct isp_operations qla82xx_isp_ops = {
.iospace_config = qla82xx_iospace_config,
};
+static struct isp_operations qla83xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla81xx_update_fw_options,
+ .load_risc = qla81xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla83xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla83xx_beacon_blink,
+ .read_optrom = qla25xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla83xx_iospace_config,
+};
+
static inline void
qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
@@ -1909,6 +2062,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2031:
+ ha->device_type |= DT_ISP2031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8031:
+ ha->device_type |= DT_ISP8031;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
}
if (IS_QLA82XX(ha))
@@ -1966,7 +2135,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
- int bars, max_id, mem_only = 0;
+ int bars, mem_only = 0;
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
@@ -1980,7 +2149,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2020,9 +2191,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
qla2x00_set_isp_flags(ha);
/* Set EEH reset type to fundamental if required by hba */
- if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
pdev->needs_freset = 1;
- }
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
@@ -2030,9 +2200,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->optrom_size = OPTROM_SIZE_2300;
/* Assign ISP specific operations. */
- max_id = MAX_TARGETS_2200;
if (IS_QLA2100(ha)) {
- max_id = MAX_TARGETS_2100;
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
req_length = REQUEST_ENTRY_CNT_2100;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2044,6 +2213,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2055,6 +2225,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2068,6 +2239,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX_TYPE(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2082,6 +2254,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA25XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2096,6 +2269,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA81XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2110,6 +2284,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
} else if (IS_QLA82XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_82XX;
rsp_length = RESPONSE_ENTRY_CNT_82XX;
@@ -2123,14 +2298,31 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA83XX(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla83xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
}
+
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
"mbx_count=%d, req_length=%d, "
"rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
- "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+ "max_fibre_devices=%d.\n",
ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
- ha->nvram_npiv_size);
+ ha->nvram_npiv_size, ha->max_fibre_devices);
ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
"isp_ops=%p, flash_conf_off=%d, "
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
@@ -2204,7 +2396,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- host->max_id = max_id;
+ host->max_id = ha->max_fibre_devices;
host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
@@ -2251,7 +2443,7 @@ que_init:
req->req_q_out = &ha->iobase->isp24.req_q_out;
rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
- if (ha->mqenable) {
+ if (ha->mqenable || IS_QLA83XX(ha)) {
req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2552,6 +2744,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (ha->mqiobase)
iounmap(ha->mqiobase);
+
+ if (IS_QLA83XX(ha) && ha->msixbase)
+ iounmap(ha->msixbase);
}
pci_release_selected_regions(ha->pdev, ha->bars);
@@ -2751,8 +2946,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
- ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
- &ha->gid_list_dma, GFP_KERNEL);
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
goto fail_free_init_cb;
@@ -2893,7 +3088,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA8XXX_TYPE(ha)) {
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
@@ -2967,7 +3162,8 @@ fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list,
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -3045,9 +3241,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->sfp_data)
dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
- if (ha->edc_data)
- dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
-
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
@@ -3062,8 +3255,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_destroy(ha->s_dma_pool);
if (ha->gid_list)
- dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
- ha->gid_list_dma);
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ ha->gid_list, ha->gid_list_dma);
if (IS_QLA82XX(ha)) {
if (!list_empty(&ha->gbl_dsd_list)) {
@@ -3095,6 +3288,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
+ kfree(ha->swl);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
@@ -3661,75 +3855,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
-static void
-qla2x00_sp_free_dma(srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- if (sp->flags & SRB_DMA_VALID) {
- scsi_dma_unmap(cmd);
- sp->flags &= ~SRB_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
- dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
- scsi_prot_sg_count(cmd), cmd->sc_data_direction);
- sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
- /* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, sp);
- sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
- }
-
- if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- dma_pool_free(ha->dl_dma_pool, sp->ctx,
- ((struct crc_context *)sp->ctx)->crc_ctx_dma);
- sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
- }
-
- if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx = sp->ctx;
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
- ctx->fcp_cmnd_dma);
- list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx->dsd_use_cnt;
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
-
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
- cmd->scsi_done(cmd);
-}
-
-void
-qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
- sp, sp->cmd);
- if (ql2xextended_error_logging & ql_dbg_io)
- BUG();
- return;
- }
- if (!atomic_dec_and_test(&sp->ref_count))
- return;
- qla2x00_sp_final_compl(ha, sp);
-}
-
/**************************************************************************
* qla2x00_timer
*
@@ -3800,7 +3925,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx && !IS_PROT_IO(sp))
+ if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3889,7 +4014,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 8
+#define FW_BLOBS 10
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3898,6 +4023,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP25XX 5
#define FW_ISP81XX 6
#define FW_ISP82XX 7
+#define FW_ISP2031 8
+#define FW_ISP8031 9
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3907,6 +4034,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
#define FW_FILE_ISP82XX "ql8200_fw.bin"
+#define FW_FILE_ISP2031 "ql2600_fw.bin"
+#define FW_FILE_ISP8031 "ql8300_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3919,6 +4048,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
{ .name = FW_FILE_ISP82XX, },
+ { .name = FW_FILE_ISP2031, },
+ { .name = FW_FILE_ISP8031, },
};
struct fw_blob *
@@ -3927,7 +4058,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct fw_blob *blob;
- blob = NULL;
if (IS_QLA2100(ha)) {
blob = &qla_fw_blobs[FW_ISP21XX];
} else if (IS_QLA2200(ha)) {
@@ -3944,6 +4074,12 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP81XX];
} else if (IS_QLA82XX(ha)) {
blob = &qla_fw_blobs[FW_ISP82XX];
+ } else if (IS_QLA2031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP2031];
+ } else if (IS_QLA8031(ha)) {
+ blob = &qla_fw_blobs[FW_ISP8031];
+ } else {
+ return NULL;
}
mutex_lock(&qla_fw_lock);
@@ -4265,6 +4401,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 16bc72844a97..3c13c0a6be63 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,6 +568,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
else if (IS_QLA82XX(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
+ } else if (IS_QLA83XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_83;
+ goto end;
}
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)req->ring;
@@ -721,13 +724,22 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
le32_to_cpu(region->size));
switch (le32_to_cpu(region->code) & 0xff) {
+ case FLT_REG_FCOE_FW:
+ if (!IS_QLA8031(ha))
+ break;
+ ha->flt_region_fw = start;
+ break;
case FLT_REG_FW:
+ if (IS_QLA8031(ha))
+ break;
ha->flt_region_fw = start;
break;
case FLT_REG_BOOT_CODE:
ha->flt_region_boot = start;
break;
case FLT_REG_VPD_0:
+ if (IS_QLA8031(ha))
+ break;
ha->flt_region_vpd_nvram = start;
if (IS_QLA82XX(ha))
break;
@@ -735,16 +747,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
- if (IS_QLA82XX(ha))
+ if (IS_QLA82XX(ha) || IS_QLA8031(ha))
break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_NVRAM_0:
+ if (IS_QLA8031(ha))
+ break;
if (ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_NVRAM_1:
+ if (IS_QLA8031(ha))
+ break;
if (!ha->flags.port0)
ha->flt_region_nvram = start;
break;
@@ -785,6 +801,31 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_VPD_82XX:
ha->flt_region_vpd = start;
break;
+ case FLT_REG_FCOE_VPD_0:
+ if (!IS_QLA8031(ha))
+ break;
+ ha->flt_region_vpd_nvram = start;
+ if (ha->flags.port0)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_FCOE_VPD_1:
+ if (!IS_QLA8031(ha))
+ break;
+ if (!ha->flags.port0)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_0:
+ if (!IS_QLA8031(ha))
+ break;
+ if (ha->flags.port0)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_FCOE_NVRAM_1:
+ if (!IS_QLA8031(ha))
+ break;
+ if (!ha->flags.port0)
+ ha->flt_region_nvram = start;
+ break;
}
}
goto done;
@@ -804,15 +845,12 @@ no_flash_data:
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
ql_dbg(ql_dbg_init, vha, 0x004a,
- "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
- loc, ha->flt_region_boot,
- ha->flt_region_fw, ha->flt_region_vpd_nvram,
- ha->flt_region_vpd);
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
- ha->flt_region_nvram,
- ha->flt_region_fdt, ha->flt_region_flt,
- ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
+ "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
+ "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+ loc, ha->flt_region_boot, ha->flt_region_fw,
+ ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
+ ha->flt_region_fcp_prio);
}
static void
@@ -948,7 +986,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
uint32_t flt_addr;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -974,7 +1013,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct qla_npiv_entry *entry;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1144,8 +1184,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_hw_data *ha = vha->hw;
/* Prepare burst-capable write on supported ISPs. */
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
- dwords > OPTROM_BURST_DWORDS) {
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
@@ -1619,6 +1659,71 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+void
+qla83xx_beacon_blink(struct scsi_qla_host *vha)
+{
+ uint32_t led_select_value;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led_cfg[6];
+ uint16_t orig_led_cfg[6];
+
+ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
+ return;
+
+ if (IS_QLA2031(ha) && ha->beacon_blink_led) {
+ if (ha->flags.port0)
+ led_select_value = 0x00201320;
+ else
+ led_select_value = 0x00201328;
+
+ qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
+ qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
+ msleep(1000);
+ qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
+ qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
+ } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
+ int rval;
+
+ /* Save Current */
+ rval = qla81xx_get_led_config(vha, orig_led_cfg);
+ /* Do the blink */
+ if (rval == QLA_SUCCESS) {
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ led_cfg[3] = 0;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x4000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x2000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ msleep(1000);
+ if (IS_QLA81XX(ha)) {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0;
+ } else {
+ led_cfg[0] = 0x4000;
+ led_cfg[1] = 0x2000;
+ led_cfg[2] = 0x4000;
+ led_cfg[3] = 0x4000;
+ led_cfg[4] = 0;
+ led_cfg[5] = 0x2000;
+ }
+ rval = qla81xx_set_led_config(vha, led_cfg);
+ }
+ /* On exit, restore original (presumes no status change) */
+ qla81xx_set_led_config(vha, orig_led_cfg);
+ }
+}
+
int
qla24xx_beacon_on(struct scsi_qla_host *vha)
{
@@ -1630,6 +1735,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
if (IS_QLA82XX(ha))
return QLA_SUCCESS;
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ goto skip_gpio; /* let blink handle it */
+
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1644,6 +1752,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
return QLA_FUNCTION_FAILED;
}
+ if (IS_QLA2031(ha))
+ goto skip_gpio;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
gpio_data = RD_REG_DWORD(&reg->gpiod);
@@ -1658,6 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
/* So all colors blink together. */
ha->beacon_color_state = 0;
+skip_gpio:
/* Let the per HBA timer kick off the blinking process. */
ha->beacon_blink_led = 1;
@@ -1676,6 +1788,13 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
return QLA_SUCCESS;
ha->beacon_blink_led = 0;
+
+ if (IS_QLA2031(ha))
+ goto set_fw_options;
+
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+ return QLA_SUCCESS;
+
ha->beacon_color_state = QLA_LED_ALL_ON;
ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
@@ -1690,6 +1809,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
RD_REG_DWORD(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+set_fw_options:
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index bfe68545203f..7f2492e88be7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,8 +150,6 @@
#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
-#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
-#define LSW(x) ((uint16_t)(x))
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -223,6 +221,15 @@ struct srb {
uint16_t reserved2;
};
+/* Mailbox request block structure */
+struct mrb {
+ struct scsi_qla_host *ha;
+ struct mbox_cmd_iocb *mbox;
+ uint32_t mbox_cmd;
+ uint16_t iocb_cnt; /* Number of used iocbs */
+ uint32_t pid;
+};
+
/*
* Asynchronous Event Queue structure
*/
@@ -265,7 +272,7 @@ struct ddb_entry {
* retried */
uint32_t default_time2wait; /* Default Min time between
* relogins (+aens) */
-
+ uint16_t chap_tbl_idx;
};
struct qla_ddb_index {
@@ -284,6 +291,7 @@ struct ql4_tuple_ddb {
uint16_t options;
#define DDB_OPT_IPV6 0x0e0e
#define DDB_OPT_IPV4 0x0f0f
+ uint8_t isid[6];
};
/*
@@ -303,7 +311,28 @@ struct ql4_tuple_ddb {
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
+enum qla4_work_type {
+ QLA4_EVENT_AEN,
+ QLA4_EVENT_PING_STATUS,
+};
+struct qla4_work_evt {
+ struct list_head list;
+ enum qla4_work_type type;
+ union {
+ struct {
+ enum iscsi_host_event_code code;
+ uint32_t data_size;
+ uint8_t data[0];
+ } aen;
+ struct {
+ uint32_t status;
+ uint32_t pid;
+ uint32_t data_size;
+ uint8_t data[0];
+ } ping;
+ } u;
+};
struct ql82xx_hw_data {
/* Offsets for flash/nvram access (set to ~0 if not used). */
@@ -657,6 +686,7 @@ struct scsi_qla_host {
struct dma_pool *chap_dma_pool;
uint8_t *chap_list; /* CHAP table cache */
struct mutex chap_sem;
+
#define CHAP_DMA_BLOCK_SIZE 512
struct workqueue_struct *task_wq;
unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
@@ -674,6 +704,15 @@ struct scsi_qla_host {
uint16_t sec_ddb_idx;
int is_reset;
uint16_t temperature;
+
+ /* event work list */
+ struct list_head work_list;
+ spinlock_t work_lock;
+
+ /* mbox iocb */
+#define MAX_MRB 128
+ struct mrb *active_mrb_array[MAX_MRB];
+ uint32_t mrb_index;
};
struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7825c141bc1a..210cd1d64475 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -331,6 +331,10 @@ struct qla_flt_region {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_PING 0x000B
+#define PING_IPV6_PROTOCOL_ENABLE 0x1
+#define PING_IPV6_LINKLOCAL_ADDR 0x4
+#define PING_IPV6_ADDR0 0x8
+#define PING_IPV6_ADDR1 0xC
#define MBOX_CMD_ENABLE_INTRS 0x0010
#define INTR_DISABLE 0
#define INTR_ENABLE 1
@@ -396,6 +400,10 @@ struct qla_flt_region {
#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
+#define FW_ADDSTATE_LINK_SPEED_10MBPS 0x0100
+#define FW_ADDSTATE_LINK_SPEED_100MBPS 0x0200
+#define FW_ADDSTATE_LINK_SPEED_1GBPS 0x0400
+#define FW_ADDSTATE_LINK_SPEED_10GBPS 0x0800
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
#define IPV6_DEFAULT_DDB_ENTRY 0x0001
@@ -918,6 +926,8 @@ struct qla4_header {
#define ET_CMND_T3 0x19
#define ET_PASSTHRU0 0x3A
#define ET_PASSTHRU_STATUS 0x3C
+#define ET_MBOX_CMD 0x38
+#define ET_MBOX_STATUS 0x39
uint8_t entryStatus;
uint8_t systemDefined;
@@ -1118,6 +1128,20 @@ struct passthru_status {
uint8_t res4[16]; /* 30-3F */
};
+struct mbox_cmd_iocb {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint32_t in_mbox[8]; /* 08-25 */
+ uint32_t res1[6]; /* 26-3F */
+};
+
+struct mbox_status_iocb {
+ struct qla4_header hdr; /* 00-03 */
+ uint32_t handle; /* 04-07 */
+ uint32_t out_mbox[8]; /* 08-25 */
+ uint32_t res1[6]; /* 26-3F */
+};
+
/*
* ISP queue - response queue entry definition.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d0dd4b330206..910536667cf5 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -81,6 +81,8 @@ int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t length, uint32_t options);
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+ char *password, int bidi, uint16_t *chap_index);
void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
@@ -181,6 +183,13 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state);
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code,
+ uint32_t data_size, uint8_t *data);
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+ uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+ uint32_t status, uint32_t pid,
+ uint32_t data_size, uint8_t *data);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90614f38b55d..90ee5d8fa731 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -86,6 +86,7 @@ static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
int qla4xxx_init_rings(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
+ int i;
/* Initialize request queue. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -125,6 +126,10 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
qla4xxx_init_response_q_entries(ha);
+ /* Initialize mabilbox active array */
+ for (i = 0; i < MAX_MRB; i++)
+ ha->active_mrb_array[i] = NULL;
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 410669351906..2a2022a6bb9b 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -445,3 +445,95 @@ queuing_error:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ret;
}
+
+static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
+{
+ struct mrb *mrb;
+
+ mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
+ if (!mrb)
+ return mrb;
+
+ mrb->ha = ha;
+ return mrb;
+}
+
+static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
+ uint32_t *in_mbox)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t i;
+ unsigned long flags;
+ uint32_t index = 0;
+
+ /* Acquire hardware specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Get pointer to the queue entry for the marker */
+ rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
+ if (rval != QLA_SUCCESS)
+ goto exit_mbox_iocb;
+
+ index = ha->mrb_index;
+ /* get valid mrb index*/
+ for (i = 0; i < MAX_MRB; i++) {
+ index++;
+ if (index == MAX_MRB)
+ index = 1;
+ if (ha->active_mrb_array[index] == NULL) {
+ ha->mrb_index = index;
+ break;
+ }
+ }
+
+ mrb->iocb_cnt = 1;
+ ha->active_mrb_array[index] = mrb;
+ mrb->mbox->handle = index;
+ mrb->mbox->hdr.entryType = ET_MBOX_CMD;
+ mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
+ memcpy(mrb->mbox->in_mbox, in_mbox, 32);
+ mrb->mbox_cmd = in_mbox[0];
+ wmb();
+
+ ha->isp_ops->queue_iocb(ha);
+exit_mbox_iocb:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return rval;
+}
+
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+ uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
+{
+ uint32_t in_mbox[8];
+ struct mrb *mrb = NULL;
+ int rval = QLA_SUCCESS;
+
+ memset(in_mbox, 0, sizeof(in_mbox));
+
+ mrb = qla4xxx_get_new_mrb(ha);
+ if (!mrb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
+ __func__));
+ rval = QLA_ERROR;
+ goto exit_ping;
+ }
+
+ in_mbox[0] = MBOX_CMD_PING;
+ in_mbox[1] = options;
+ memcpy(&in_mbox[2], &ipaddr[0], 4);
+ memcpy(&in_mbox[3], &ipaddr[4], 4);
+ memcpy(&in_mbox[4], &ipaddr[8], 4);
+ memcpy(&in_mbox[5], &ipaddr[12], 4);
+ in_mbox[6] = payload_size;
+
+ mrb->pid = pid;
+ rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
+
+ if (rval != QLA_SUCCESS)
+ goto exit_ping;
+
+ return rval;
+exit_ping:
+ kfree(mrb);
+ return rval;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 95828862eea0..fc542a9bb106 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,6 +385,71 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
queue_work(ha->task_wq, &task_data->task_work);
}
+static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index)
+{
+ struct mrb *mrb = NULL;
+
+ /* validate handle and remove from active array */
+ if (index >= MAX_MRB)
+ return mrb;
+
+ mrb = ha->active_mrb_array[index];
+ ha->active_mrb_array[index] = NULL;
+ if (!mrb)
+ return mrb;
+
+ /* update counters */
+ ha->req_q_count += mrb->iocb_cnt;
+ ha->iocb_cnt -= mrb->iocb_cnt;
+
+ return mrb;
+}
+
+static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
+ struct mbox_status_iocb *mbox_sts_entry)
+{
+ struct mrb *mrb;
+ uint32_t status;
+ uint32_t data_size;
+
+ mrb = qla4xxx_del_mrb_from_active_array(ha,
+ le32_to_cpu(mbox_sts_entry->handle));
+
+ if (mrb == NULL) {
+ ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
+ mbox_sts_entry->handle);
+ return;
+ }
+
+ switch (mrb->mbox_cmd) {
+ case MBOX_CMD_PING:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
+ "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
+ __func__, mrb->mbox_cmd,
+ mbox_sts_entry->out_mbox[0],
+ mbox_sts_entry->out_mbox[6]));
+
+ if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
+ status = ISCSI_PING_SUCCESS;
+ else
+ status = mbox_sts_entry->out_mbox[6];
+
+ data_size = sizeof(mbox_sts_entry->out_mbox);
+
+ qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
+ (uint8_t *) mbox_sts_entry->out_mbox);
+ break;
+
+ default:
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
+ "0x%x\n", __func__, mrb->mbox_cmd));
+ }
+
+ kfree(mrb);
+ return;
+}
+
/**
* qla4xxx_process_response_queue - process response queue completions
* @ha: Pointer to host adapter structure.
@@ -461,6 +526,13 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
"ignoring\n", ha->host_no, __func__));
break;
+ case ET_MBOX_STATUS:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: mbox status IOCB\n", __func__));
+ qla4xxx_mbox_status_entry(ha,
+ (struct mbox_status_iocb *)sts_entry);
+ break;
+
default:
/*
* Invalid entry in response queue, reset RISC
@@ -576,6 +648,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
+ qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
+ sizeof(mbox_sts),
+ (uint8_t *) mbox_sts);
break;
case MBOX_ASTS_LINK_DOWN:
@@ -584,6 +659,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
+ qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
+ sizeof(mbox_sts),
+ (uint8_t *) mbox_sts);
break;
case MBOX_ASTS_HEARTBEAT:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index e1e66a45e4d0..7ac21dabbf22 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -622,7 +622,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
return QLA_ERROR;
}
- ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
+ ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
ha->host_no, mbox_sts[2]);
return QLA_SUCCESS;
@@ -661,6 +661,8 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
}
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+ if (fw_ddb_entry)
+ memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
@@ -1424,8 +1426,8 @@ exit_set_chap:
* match is found. If a match is not found then add the entry in FLASH and
* return the index at which entry is written in the FLASH.
**/
-static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
- char *password, int bidi, uint16_t *chap_index)
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+ char *password, int bidi, uint16_t *chap_index)
{
int i, rval;
int free_index = -1;
@@ -1444,6 +1446,11 @@ static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
return QLA_ERROR;
}
+ if (!username || !password) {
+ ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
+ return QLA_ERROR;
+ }
+
mutex_lock(&ha->chap_sem);
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
@@ -1600,7 +1607,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
char *ip;
uint16_t iscsi_opts = 0;
uint32_t options = 0;
- uint16_t idx;
+ uint16_t idx, *ptid;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
@@ -1626,6 +1633,14 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
goto exit_set_param;
}
+ ptid = (uint16_t *)&fw_ddb_entry->isid[1];
+ *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
+ fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
+ fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
+ fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 65253dfbe962..e1e46b6dac75 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -841,11 +841,8 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla4_8xxx_rom_lock_timeout) {
- ql4_printk(KERN_WARNING, ha,
- "%s: Failed to acquire rom lock", __func__);
+ if (timeout >= qla4_8xxx_rom_lock_timeout)
return -1;
- }
timeout++;
@@ -996,18 +993,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
else
qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
- /* reset ms */
- val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
- val |= (1 << 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
-
- msleep(20);
- /* unreset ms */
- val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
- val &= ~(1 << 1);
- qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
- msleep(20);
-
qla4_8xxx_rom_unlock(ha);
/* Read the signature value from the flash.
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc45ac923691..dc7500e47b8b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -623,6 +623,7 @@ struct crb_addr_pair {
#define ADDR_ERROR ((unsigned long) 0xffffffff)
#define MAX_CTL_CHECK 1000
+#define QLA82XX_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
/***************************************************************************
* PCI related defines.
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ce6d3b7f0c61..ee47820c30a6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -32,14 +32,14 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
-int ql4xdisablesysfsboot = 1;
+static int ql4xdisablesysfsboot = 1;
module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdisablesysfsboot,
" Set to disable exporting boot targets to sysfs.\n"
"\t\t 0 - Export boot targets\n"
"\t\t 1 - Do not export boot targets (Default)");
-int ql4xdontresethba = 0;
+int ql4xdontresethba;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
" Don't reset the HBA for driver recovery.\n"
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
- "Target Session Recovery Timeout.\n"
+ " Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
@@ -83,6 +83,8 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+ enum iscsi_param param, char *buf);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -118,6 +120,13 @@ static void qla4xxx_task_cleanup(struct iscsi_task *);
static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats);
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+ uint32_t iface_type, uint32_t payload_size,
+ uint32_t pid, struct sockaddr *dst_addr);
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+ uint32_t *num_entries, char *buf);
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+
/*
* SCSI host template entry points
*/
@@ -179,7 +188,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
.get_conn_param = qla4xxx_conn_get_param,
- .get_session_param = iscsi_session_get_param,
+ .get_session_param = qla4xxx_session_get_param,
.get_ep_param = qla4xxx_get_ep_param,
.ep_connect = qla4xxx_ep_connect,
.ep_poll = qla4xxx_ep_poll,
@@ -194,10 +203,93 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.set_iface_param = qla4xxx_iface_set_param,
.get_iface_param = qla4xxx_get_iface_param,
.bsg_request = qla4xxx_bsg_request,
+ .send_ping = qla4xxx_send_ping,
+ .get_chap = qla4xxx_get_chap_list,
+ .delete_chap = qla4xxx_delete_chap,
};
static struct scsi_transport_template *qla4xxx_scsi_transport;
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+ uint32_t iface_type, uint32_t payload_size,
+ uint32_t pid, struct sockaddr *dst_addr)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ uint32_t options = 0;
+ uint8_t ipaddr[IPv6_ADDR_LEN];
+ int rval;
+
+ memset(ipaddr, 0, IPv6_ADDR_LEN);
+ /* IPv4 to IPv4 */
+ if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
+ (dst_addr->sa_family == AF_INET)) {
+ addr = (struct sockaddr_in *)dst_addr;
+ memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
+ "dest: %pI4\n", __func__,
+ &ha->ip_config.ip_address, ipaddr));
+ rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
+ ipaddr);
+ if (rval)
+ rval = -EINVAL;
+ } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
+ (dst_addr->sa_family == AF_INET6)) {
+ /* IPv6 to IPv6 */
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
+
+ options |= PING_IPV6_PROTOCOL_ENABLE;
+
+ /* Ping using LinkLocal address */
+ if ((iface_num == 0) || (iface_num == 1)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
+ "src: %pI6 dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_link_local_addr,
+ ipaddr));
+ options |= PING_IPV6_LINKLOCAL_ADDR;
+ rval = qla4xxx_ping_iocb(ha, options, payload_size,
+ pid, ipaddr);
+ } else {
+ ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
+ "not supported\n", __func__, iface_num);
+ rval = -ENOSYS;
+ goto exit_send_ping;
+ }
+
+ /*
+ * If ping using LinkLocal address fails, try ping using
+ * IPv6 address
+ */
+ if (rval != QLA_SUCCESS) {
+ options &= ~PING_IPV6_LINKLOCAL_ADDR;
+ if (iface_num == 0) {
+ options |= PING_IPV6_ADDR0;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+ "Ping src: %pI6 "
+ "dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_addr0,
+ ipaddr));
+ } else if (iface_num == 1) {
+ options |= PING_IPV6_ADDR1;
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+ "Ping src: %pI6 "
+ "dest: %pI6\n", __func__,
+ &ha->ip_config.ipv6_addr1,
+ ipaddr));
+ }
+ rval = qla4xxx_ping_iocb(ha, options, payload_size,
+ pid, ipaddr);
+ if (rval)
+ rval = -EINVAL;
+ }
+ } else
+ rval = -ENOSYS;
+exit_send_ping:
+ return rval;
+}
+
static umode_t ql4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
@@ -206,6 +298,8 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
case ISCSI_HOST_PARAM_HWADDRESS:
case ISCSI_HOST_PARAM_IPADDRESS:
case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ case ISCSI_HOST_PARAM_PORT_SPEED:
return S_IRUGO;
default:
return 0;
@@ -225,6 +319,12 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_MAX_RECV_DLENGTH:
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
case ISCSI_PARAM_IFACE_NAME:
+ case ISCSI_PARAM_CHAP_OUT_IDX:
+ case ISCSI_PARAM_CHAP_IN_IDX:
+ case ISCSI_PARAM_USERNAME:
+ case ISCSI_PARAM_PASSWORD:
+ case ISCSI_PARAM_USERNAME_IN:
+ case ISCSI_PARAM_PASSWORD_IN:
return S_IRUGO;
default:
return 0;
@@ -255,6 +355,189 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
return 0;
}
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+ uint32_t *num_entries, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_chap_table *chap_table;
+ struct iscsi_chap_rec *chap_rec;
+ int max_chap_entries = 0;
+ int valid_chap_entries = 0;
+ int ret = 0, i;
+
+ if (is_qla8022(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
+ __func__, *num_entries, chap_tbl_idx);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit_get_chap_list;
+ }
+
+ chap_rec = (struct iscsi_chap_rec *) buf;
+ mutex_lock(&ha->chap_sem);
+ for (i = chap_tbl_idx; i < max_chap_entries; i++) {
+ chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+ if (chap_table->cookie !=
+ __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+ continue;
+
+ chap_rec->chap_tbl_idx = i;
+ strncpy(chap_rec->username, chap_table->name,
+ ISCSI_CHAP_AUTH_NAME_MAX_LEN);
+ strncpy(chap_rec->password, chap_table->secret,
+ QL4_CHAP_MAX_SECRET_LEN);
+ chap_rec->password_length = chap_table->secret_len;
+
+ if (chap_table->flags & BIT_7) /* local */
+ chap_rec->chap_type = CHAP_TYPE_OUT;
+
+ if (chap_table->flags & BIT_6) /* peer */
+ chap_rec->chap_type = CHAP_TYPE_IN;
+
+ chap_rec++;
+
+ valid_chap_entries++;
+ if (valid_chap_entries == *num_entries)
+ break;
+ else
+ continue;
+ }
+ mutex_unlock(&ha->chap_sem);
+
+exit_get_chap_list:
+ ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
+ __func__, valid_chap_entries);
+ *num_entries = valid_chap_entries;
+ return ret;
+}
+
+static int __qla4xxx_is_chap_active(struct device *dev, void *data)
+{
+ int ret = 0;
+ uint16_t *chap_tbl_idx = (uint16_t *) data;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+
+ if (!iscsi_is_session_dev(dev))
+ goto exit_is_chap_active;
+
+ cls_session = iscsi_dev_to_session(dev);
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+
+ if (iscsi_session_chkready(cls_session))
+ goto exit_is_chap_active;
+
+ if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
+ ret = 1;
+
+exit_is_chap_active:
+ return ret;
+}
+
+static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
+ uint16_t chap_tbl_idx)
+{
+ int ret = 0;
+
+ ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
+ __qla4xxx_is_chap_active);
+
+ return ret;
+}
+
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_chap_table *chap_table;
+ dma_addr_t chap_dma;
+ int max_chap_entries = 0;
+ uint32_t offset = 0;
+ uint32_t chap_size;
+ int ret = 0;
+
+ chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ if (chap_table == NULL)
+ return -ENOMEM;
+
+ memset(chap_table, 0, sizeof(struct ql4_chap_table));
+
+ if (is_qla8022(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_tbl_idx > max_chap_entries) {
+ ret = -EINVAL;
+ goto exit_delete_chap;
+ }
+
+ /* Check if chap index is in use.
+ * If chap is in use don't delet chap entry */
+ ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
+ if (ret) {
+ ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
+ "delete from flash\n", chap_tbl_idx);
+ ret = -EBUSY;
+ goto exit_delete_chap;
+ }
+
+ chap_size = sizeof(struct ql4_chap_table);
+ if (is_qla40XX(ha))
+ offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
+ else {
+ offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+ /* flt_chap_size is CHAP table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ offset += (ha->hw.flt_chap_size / 2);
+ offset += (chap_tbl_idx * chap_size);
+ }
+
+ ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+ if (ret != QLA_SUCCESS) {
+ ret = -EINVAL;
+ goto exit_delete_chap;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+ __le16_to_cpu(chap_table->cookie)));
+
+ if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+ ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+ goto exit_delete_chap;
+ }
+
+ chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+
+ offset = FLASH_CHAP_OFFSET |
+ (chap_tbl_idx * sizeof(struct ql4_chap_table));
+ ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
+ FLASH_OPT_RMW_COMMIT);
+ if (ret == QLA_SUCCESS && ha->chap_list) {
+ mutex_lock(&ha->chap_sem);
+ /* Update ha chap_list cache */
+ memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
+ chap_table, sizeof(struct ql4_chap_table));
+ mutex_unlock(&ha->chap_sem);
+ }
+ if (ret != QLA_SUCCESS)
+ ret = -EINVAL;
+
+exit_delete_chap:
+ dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+ return ret;
+}
+
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf)
@@ -548,6 +831,43 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
return ret;
}
+static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
+
+ qla4xxx_get_firmware_state(ha);
+
+ switch (ha->addl_fw_state & 0x0F00) {
+ case FW_ADDSTATE_LINK_SPEED_10MBPS:
+ speed = ISCSI_PORT_SPEED_10MBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_100MBPS:
+ speed = ISCSI_PORT_SPEED_100MBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_1GBPS:
+ speed = ISCSI_PORT_SPEED_1GBPS;
+ break;
+ case FW_ADDSTATE_LINK_SPEED_10GBPS:
+ speed = ISCSI_PORT_SPEED_10GBPS;
+ break;
+ }
+ ihost->port_speed = speed;
+}
+
+static void qla4xxx_set_port_state(struct Scsi_Host *shost)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t state = ISCSI_PORT_STATE_DOWN;
+
+ if (test_bit(AF_LINK_UP, &ha->flags))
+ state = ISCSI_PORT_STATE_UP;
+
+ ihost->port_state = state;
+}
+
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
@@ -564,6 +884,14 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ha->name_string);
break;
+ case ISCSI_HOST_PARAM_PORT_STATE:
+ qla4xxx_set_port_state(shost);
+ len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+ break;
+ case ISCSI_HOST_PARAM_PORT_SPEED:
+ qla4xxx_set_port_speed(shost);
+ len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+ break;
default:
return -ENOSYS;
}
@@ -968,6 +1296,41 @@ exit_init_fw_cb:
return rval;
}
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_session *sess = cls_sess->dd_data;
+ struct ddb_entry *ddb_entry = sess->dd_data;
+ struct scsi_qla_host *ha = ddb_entry->ha;
+ int rval, len;
+ uint16_t idx;
+
+ switch (param) {
+ case ISCSI_PARAM_CHAP_IN_IDX:
+ rval = qla4xxx_get_chap_index(ha, sess->username_in,
+ sess->password_in, BIDI_CHAP,
+ &idx);
+ if (rval)
+ return -EINVAL;
+
+ len = sprintf(buf, "%hu\n", idx);
+ break;
+ case ISCSI_PARAM_CHAP_OUT_IDX:
+ rval = qla4xxx_get_chap_index(ha, sess->username,
+ sess->password, LOCAL_CHAP,
+ &idx);
+ if (rval)
+ return -EINVAL;
+
+ len = sprintf(buf, "%hu\n", idx);
+ break;
+ default:
+ return iscsi_session_get_param(cls_sess, param, buf);
+ }
+
+ return len;
+}
+
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
@@ -1506,13 +1869,17 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
{
int buflen = 0;
struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
struct iscsi_conn *conn;
char ip_addr[DDB_IPADDR_LEN];
uint16_t options = 0;
sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
conn = cls_conn->dd_data;
+ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1552,6 +1919,8 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
(char *)ha->name_string, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
(char *)ip_addr, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+ (char *)fw_ddb_entry->iscsi_alias, buflen);
}
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -1638,6 +2007,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
/* Update params */
+ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1666,6 +2036,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+ (char *)fw_ddb_entry->iscsi_alias, 0);
+
exit_session_conn_param:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2113,7 +2486,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- if (LSW(MSB(halt_status)) == 0x67)
+ if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
ql4_printk(KERN_ERR, ha, "%s:"
" Firmware aborted with"
" error code 0x00006700."
@@ -2230,6 +2603,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
}
}
+ /* Process any deferred work. */
+ if (!list_empty(&ha->work_list))
+ start_dpc++;
+
/* Wakeup the dpc routine for this adapter, if needed. */
if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -2795,6 +3172,109 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
queue_work(ha->dpc_thread, &ha->dpc_work);
}
+static struct qla4_work_evt *
+qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
+ enum qla4_work_type type)
+{
+ struct qla4_work_evt *e;
+ uint32_t size = sizeof(struct qla4_work_evt) + data_size;
+
+ e = kzalloc(size, GFP_ATOMIC);
+ if (!e)
+ return NULL;
+
+ INIT_LIST_HEAD(&e->list);
+ e->type = type;
+ return e;
+}
+
+static void qla4xxx_post_work(struct scsi_qla_host *ha,
+ struct qla4_work_evt *e)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->work_lock, flags);
+ list_add_tail(&e->list, &ha->work_list);
+ spin_unlock_irqrestore(&ha->work_lock, flags);
+ qla4xxx_wake_dpc(ha);
+}
+
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+ enum iscsi_host_event_code aen_code,
+ uint32_t data_size, uint8_t *data)
+{
+ struct qla4_work_evt *e;
+
+ e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
+ if (!e)
+ return QLA_ERROR;
+
+ e->u.aen.code = aen_code;
+ e->u.aen.data_size = data_size;
+ memcpy(e->u.aen.data, data, data_size);
+
+ qla4xxx_post_work(ha, e);
+
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+ uint32_t status, uint32_t pid,
+ uint32_t data_size, uint8_t *data)
+{
+ struct qla4_work_evt *e;
+
+ e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
+ if (!e)
+ return QLA_ERROR;
+
+ e->u.ping.status = status;
+ e->u.ping.pid = pid;
+ e->u.ping.data_size = data_size;
+ memcpy(e->u.ping.data, data, data_size);
+
+ qla4xxx_post_work(ha, e);
+
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_do_work(struct scsi_qla_host *ha)
+{
+ struct qla4_work_evt *e, *tmp;
+ unsigned long flags;
+ LIST_HEAD(work);
+
+ spin_lock_irqsave(&ha->work_lock, flags);
+ list_splice_init(&ha->work_list, &work);
+ spin_unlock_irqrestore(&ha->work_lock, flags);
+
+ list_for_each_entry_safe(e, tmp, &work, list) {
+ list_del_init(&e->list);
+
+ switch (e->type) {
+ case QLA4_EVENT_AEN:
+ iscsi_post_host_event(ha->host_no,
+ &qla4xxx_iscsi_transport,
+ e->u.aen.code,
+ e->u.aen.data_size,
+ e->u.aen.data);
+ break;
+ case QLA4_EVENT_PING_STATUS:
+ iscsi_ping_comp_event(ha->host_no,
+ &qla4xxx_iscsi_transport,
+ e->u.ping.status,
+ e->u.ping.pid,
+ e->u.ping.data_size,
+ e->u.ping.data);
+ break;
+ default:
+ ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
+ "supported", e->type);
+ }
+ kfree(e);
+ }
+}
+
/**
* qla4xxx_do_dpc - dpc routine
* @data: in our case pointer to adapter structure
@@ -2826,6 +3306,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
return;
}
+ /* post events to application */
+ qla4xxx_do_work(ha);
+
if (is_qla8022(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
qla4_8xxx_idc_lock(ha);
@@ -2962,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
{
int status = 0;
- uint8_t revision_id;
unsigned long mem_base, mem_len, db_base, db_len;
struct pci_dev *pdev = ha->pdev;
@@ -2974,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
goto iospace_error_exit;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
- __func__, revision_id));
- ha->revision_id = revision_id;
+ __func__, pdev->revision));
+ ha->revision_id = pdev->revision;
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
@@ -3341,9 +3822,8 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
/* Check Boot Mode */
val = rd_nvram_byte(ha, addr);
if (!(val & 0x07)) {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "%s: Failed Boot options : 0x%x\n",
- __func__, val));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
+ "options : 0x%x\n", __func__, val));
ret = QLA_ERROR;
goto exit_boot_info;
}
@@ -3382,15 +3862,14 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
if (qla4xxx_get_flash(ha, buf_dma, addr,
13 * sizeof(uint8_t)) != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
- "failed\n", ha->host_no, __func__));
+ " failed\n", ha->host_no, __func__));
ret = QLA_ERROR;
goto exit_boot_info_free;
}
/* Check Boot Mode */
if (!(buf[1] & 0x07)) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Failed: Boot options : 0x%x\n",
- buf[1]));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
+ " : 0x%x\n", buf[1]));
ret = QLA_ERROR;
goto exit_boot_info_free;
}
@@ -3411,12 +3890,11 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
- ha->pri_ddb_idx = ddb_index[0];
- ha->sec_ddb_idx = ddb_index[1];
-
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
return ret;
}
@@ -3497,8 +3975,8 @@ static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
fw_ddb_entry_dma, ddb_index)) {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "%s: Flash DDB read Failed\n", __func__));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
+ "index [%d]\n", __func__, ddb_index));
ret = QLA_ERROR;
goto exit_boot_target;
}
@@ -3576,8 +4054,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
ddb_index[1] = 0xffff;
ret = get_fw_boot_info(ha, ddb_index);
if (ret != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "%s: Failed to set boot info.\n", __func__));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: No boot target configured.\n", __func__));
return ret;
}
@@ -3590,8 +4068,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
ddb_index[0]);
if (rval != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
- "primary target\n", __func__));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
+ "configured\n", __func__));
} else
ret = QLA_SUCCESS;
@@ -3602,8 +4080,8 @@ sec_target:
rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
ddb_index[1]);
if (rval != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
- "secondary target\n", __func__));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
+ " configured\n", __func__));
} else
ret = QLA_SUCCESS;
@@ -3620,7 +4098,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (ql4xdisablesysfsboot) {
ql4_printk(KERN_INFO, ha,
- "%s: syfsboot disabled - driver will trigger login"
+ "%s: syfsboot disabled - driver will trigger login "
"and publish session for discovery .\n", __func__);
return QLA_SUCCESS;
}
@@ -3772,11 +4250,13 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
tddb->port = le16_to_cpu(fw_ddb_entry->port);
+ memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
}
static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
struct ql4_tuple_ddb *old_tddb,
- struct ql4_tuple_ddb *new_tddb)
+ struct ql4_tuple_ddb *new_tddb,
+ uint8_t is_isid_compare)
{
if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
return QLA_ERROR;
@@ -3787,6 +4267,26 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
if (old_tddb->port != new_tddb->port)
return QLA_ERROR;
+ /* For multi sessions, driver generates the ISID, so do not compare
+ * ISID in reset path since it would be a comparision between the
+ * driver generated ISID and firmware generated ISID. This could
+ * lead to adding duplicated DDBs in the list as driver generated
+ * ISID would not match firmware generated ISID.
+ */
+ if (is_isid_compare) {
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
+ "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
+ __func__, old_tddb->isid[5], old_tddb->isid[4],
+ old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
+ old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
+ new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
+ new_tddb->isid[0]));
+
+ if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+ sizeof(old_tddb->isid)))
+ return QLA_ERROR;
+ }
+
DEBUG2(ql4_printk(KERN_INFO, ha,
"Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
@@ -3829,7 +4329,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
continue;
qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
- if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
ret = QLA_SUCCESS; /* found */
goto exit_check;
}
@@ -3872,7 +4372,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
- if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
ret = QLA_SUCCESS; /* found */
goto exit_check;
}
@@ -4038,6 +4538,10 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
if (ret == QLA_ERROR)
break;
+ /* Ignore DDB if invalid state (unassigned) */
+ if (state == DDB_DS_UNASSIGNED)
+ goto continue_next_st;
+
/* Check if ST, add to the list_st */
if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
goto continue_next_st;
@@ -4397,6 +4901,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
spin_lock_init(&ha->hardware_lock);
+ /* Initialize work list */
+ INIT_LIST_HEAD(&ha->work_list);
+
/* Allocate dma buffers */
if (qla4xxx_mem_alloc(ha)) {
ql4_printk(KERN_WARNING, ha,
@@ -4524,8 +5031,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->patch_number, ha->build_number);
if (qla4xxx_setup_boot_info(ha))
- ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
- __func__);
+ ql4_printk(KERN_ERR, ha,
+ "%s: No iSCSI boot target configured\n", __func__);
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 133989b3a9f4..97b30c108e36 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e40dc1cb09a0..b191dd549207 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -35,7 +35,6 @@
#include "qlogicpti.h"
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2aeb2e9c4d3b..07322ecff90d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -782,12 +782,6 @@ static void scsi_done(struct scsi_cmnd *cmd)
blk_complete_request(cmd->request);
}
-/* Move this to a header if it becomes more generally useful */
-static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
-{
- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
-}
-
/**
* scsi_finish_command - cleanup and pass command back to upper layer
* @cmd: the command
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6888b2ca5bfc..182d5a57ab74 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_LBPU 0
#define DEF_LBPWS 0
#define DEF_LBPWS10 0
+#define DEF_LBPRZ 1
#define DEF_LOWEST_ALIGNED 0
#define DEF_NO_LUN_0 0
#define DEF_NUM_PARTS 0
@@ -126,6 +127,7 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
#define SCSI_DEBUG_OPT_DIF_ERR 32
#define SCSI_DEBUG_OPT_DIX_ERR 64
+#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -185,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
static unsigned int scsi_debug_lbpu = DEF_LBPU;
static unsigned int scsi_debug_lbpws = DEF_LBPWS;
static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
+static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
@@ -774,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr)
return 0x3c;
}
-/* Thin provisioning VPD page (SBC-3) */
+/* Logical block provisioning VPD page (SBC-3) */
static int inquiry_evpd_b2(unsigned char *arr)
{
- memset(arr, 0, 0x8);
+ memset(arr, 0, 0x4);
arr[0] = 0; /* threshold exponent */
if (scsi_debug_lbpu)
@@ -789,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr)
if (scsi_debug_lbpws10)
arr[1] |= 1 << 5;
- return 0x8;
+ if (scsi_debug_lbprz)
+ arr[1] |= 1 << 2;
+
+ return 0x4;
}
#define SDEBUG_LONG_INQ_SZ 96
@@ -1070,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[13] = scsi_debug_physblk_exp & 0xf;
arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
- if (scsi_debug_lbp())
+ if (scsi_debug_lbp()) {
arr[14] |= 0x80; /* LBPME */
+ if (scsi_debug_lbprz)
+ arr[14] |= 0x40; /* LBPRZ */
+ }
arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -1778,7 +1787,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
int len = min(psgl->length, resid);
- paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
+ paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
memcpy(paddr, dif_storep + dif_offset(sector), len);
sector += len >> 3;
@@ -1788,7 +1797,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
sector = do_div(tmp_sec, sdebug_store_sectors);
}
resid -= len;
- kunmap_atomic(paddr, KM_IRQ0);
+ kunmap_atomic(paddr);
}
dix_reads++;
@@ -1881,12 +1890,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
- paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
+ paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
ppage_offset = 0;
/* For each data page */
scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
- daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
+ daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
/* For each sector-sized chunk in data page */
for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@@ -1895,10 +1904,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
* protection page advance to the next one
*/
if (ppage_offset >= psgl->length) {
- kunmap_atomic(paddr, KM_IRQ1);
+ kunmap_atomic(paddr);
psgl = sg_next(psgl);
BUG_ON(psgl == NULL);
- paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
+ paddr = kmap_atomic(sg_page(psgl))
+ psgl->offset;
ppage_offset = 0;
}
@@ -1971,10 +1980,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
ppage_offset += sizeof(struct sd_dif_tuple);
}
- kunmap_atomic(daddr, KM_IRQ0);
+ kunmap_atomic(daddr);
}
- kunmap_atomic(paddr, KM_IRQ1);
+ kunmap_atomic(paddr);
dix_writes++;
@@ -1982,8 +1991,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
out:
dif_errors++;
- kunmap_atomic(daddr, KM_IRQ0);
- kunmap_atomic(paddr, KM_IRQ1);
+ kunmap_atomic(daddr);
+ kunmap_atomic(paddr);
return ret;
}
@@ -2045,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len)
block = lba + alignment;
rem = do_div(block, granularity);
- if (rem == 0 && lba + granularity <= end &&
- block < map_size)
+ if (rem == 0 && lba + granularity <= end && block < map_size) {
clear_bit(block, map_storep);
-
+ if (scsi_debug_lbprz)
+ memset(fake_storep +
+ block * scsi_debug_sector_size, 0,
+ scsi_debug_sector_size);
+ }
lba += granularity - rem;
}
}
@@ -2220,7 +2232,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd,
mapped = map_state(lba, &num);
memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
- put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
+ put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
put_unaligned_be64(lba, &arr[8]); /* LBA */
put_unaligned_be32(num, &arr[16]); /* Number of blocks */
arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
@@ -2303,7 +2315,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
offset = 0;
for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
- kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
+ kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
if (!kaddr)
goto out;
@@ -2311,7 +2323,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
*(kaddr + sg->offset + j) ^= *(buf + offset + j);
offset += sg->length;
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
ret = 0;
out:
@@ -2730,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
@@ -2771,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
@@ -3615,6 +3629,9 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
scsi_debug_every_nth = -1;
if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
return 0; /* ignore command causing timeout */
+ else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
+ scsi_medium_access_command(SCpnt))
+ return 0; /* time out reads and writes */
else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
inj_recovered = 1; /* to reads and writes below */
else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5f84a148eb14..386f0c53bea7 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -30,6 +30,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_host.h>
@@ -141,11 +142,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
else if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
+ scmd->result |= DID_TIME_OUT << 16;
+
if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
- !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
- scmd->result |= DID_TIME_OUT << 16;
+ !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
rtn = BLK_EH_HANDLED;
- }
return rtn;
}
@@ -366,6 +367,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return TARGET_ERROR;
case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x20 || /* Invalid command operation code */
+ sshdr.asc == 0x21 || /* Logical block address out of range */
+ sshdr.asc == 0x24 || /* Invalid field in cdb */
+ sshdr.asc == 0x26) { /* Parameter value invalid */
+ return TARGET_ERROR;
+ }
+ return SUCCESS;
+
default:
return SUCCESS;
}
@@ -770,6 +779,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
int cmnd_size, int timeout, unsigned sense_bytes)
{
struct scsi_device *sdev = scmd->device;
+ struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
struct Scsi_Host *shost = sdev->host;
DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft;
@@ -824,6 +834,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
}
scsi_eh_restore_cmnd(scmd, &ses);
+
+ if (sdrv && sdrv->eh_action)
+ rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
+
return rtn;
}
@@ -1540,7 +1554,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* Need to modify host byte to signal a
* permanent target failure
*/
- scmd->result |= (DID_TARGET_FAILURE << 16);
+ set_host_byte(scmd, DID_TARGET_FAILURE);
rtn = SUCCESS;
}
/* if rtn == FAILED, we have no sense information;
@@ -1560,7 +1574,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
- scmd->result |= (DID_NEXUS_FAILURE << 16);
+ set_host_byte(scmd, DID_NEXUS_FAILURE);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2c95dbe9d65..ead6405f3e51 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -682,11 +682,11 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
error = -ENOLINK;
break;
case DID_TARGET_FAILURE:
- cmd->result |= (DID_OK << 16);
+ set_host_byte(cmd, DID_OK);
error = -EREMOTEIO;
break;
case DID_NEXUS_FAILURE:
- cmd->result |= (DID_OK << 16);
+ set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
default:
@@ -880,6 +880,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
cmd->cmnd[0] == WRITE_SAME)) {
description = "Discard failure";
action = ACTION_FAIL;
+ error = -EREMOTEIO;
} else
action = ACTION_FAIL;
break;
@@ -2567,7 +2568,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
if (*len > sg_len)
*len = sg_len;
- return kmap_atomic(page, KM_BIO_SRC_IRQ);
+ return kmap_atomic(page);
}
EXPORT_SYMBOL(scsi_kmap_atomic_sg);
@@ -2577,6 +2578,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
*/
void scsi_kunmap_atomic_sg(void *virt)
{
- kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt);
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 29c4c0480976..01b03744f1f9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1295,6 +1295,7 @@ EXPORT_SYMBOL(int_to_scsilun);
* LUNs even if it's older than SCSI-3.
* If BLIST_NOREPORTLUN is set, return 1 always.
* If BLIST_NOLUN is set, return 0 always.
+ * If starget->no_report_luns is set, return 1 always.
*
* Return:
* 0: scan completed (or no memory, so further scanning is futile)
@@ -1321,6 +1322,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
* Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
* Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
* support more than 8 LUNs.
+ * Don't attempt if the target doesn't support REPORT LUNS.
*/
if (bflags & BLIST_NOREPORTLUN)
return 1;
@@ -1332,6 +1334,8 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
return 1;
if (bflags & BLIST_NOLUN)
return 0;
+ if (starget->no_report_luns)
+ return 1;
if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
sdev = scsi_alloc_sdev(starget, 0, NULL);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f59d4a05ecd7..80fbe2ac0b47 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -313,7 +313,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
#define FC_STARGET_NUM_ATTRS 3
#define FC_RPORT_NUM_ATTRS 10
#define FC_VPORT_NUM_ATTRS 9
-#define FC_HOST_NUM_ATTRS 22
+#define FC_HOST_NUM_ATTRS 29
struct fc_internal {
struct scsi_transport_template t;
@@ -399,6 +399,20 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->max_npiv_vports = 0;
memset(fc_host->serial_number, 0,
sizeof(fc_host->serial_number));
+ memset(fc_host->manufacturer, 0,
+ sizeof(fc_host->manufacturer));
+ memset(fc_host->model, 0,
+ sizeof(fc_host->model));
+ memset(fc_host->model_description, 0,
+ sizeof(fc_host->model_description));
+ memset(fc_host->hardware_version, 0,
+ sizeof(fc_host->hardware_version));
+ memset(fc_host->driver_version, 0,
+ sizeof(fc_host->driver_version));
+ memset(fc_host->firmware_version, 0,
+ sizeof(fc_host->firmware_version));
+ memset(fc_host->optionrom_version, 0,
+ sizeof(fc_host->optionrom_version));
fc_host->port_id = -1;
fc_host->port_type = FC_PORTTYPE_UNKNOWN;
@@ -1513,6 +1527,13 @@ fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
+fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
+fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
+fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
+fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
/* Dynamic Host Attributes */
@@ -2208,6 +2229,13 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
}
SETUP_HOST_ATTRIBUTE_RD(serial_number);
+ SETUP_HOST_ATTRIBUTE_RD(manufacturer);
+ SETUP_HOST_ATTRIBUTE_RD(model);
+ SETUP_HOST_ATTRIBUTE_RD(model_description);
+ SETUP_HOST_ATTRIBUTE_RD(hardware_version);
+ SETUP_HOST_ATTRIBUTE_RD(driver_version);
+ SETUP_HOST_ATTRIBUTE_RD(firmware_version);
+ SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
SETUP_HOST_ATTRIBUTE_RD(port_id);
SETUP_HOST_ATTRIBUTE_RD(port_type);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index cfd491437239..1cf640e575da 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -727,10 +727,11 @@ static void iscsi_session_release(struct device *dev)
kfree(session);
}
-static int iscsi_is_session_dev(const struct device *dev)
+int iscsi_is_session_dev(const struct device *dev)
{
return dev->release == iscsi_session_release;
}
+EXPORT_SYMBOL_GPL(iscsi_is_session_dev);
static int iscsi_iter_session_fn(struct device *dev, void *data)
{
@@ -1476,6 +1477,66 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
}
EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
+void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
+ enum iscsi_host_event_code code, uint32_t data_size,
+ uint8_t *data)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_NOIO);
+ if (!skb) {
+ printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
+ host_no, code);
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+ ev->transport_handle = iscsi_handle(transport);
+ ev->type = ISCSI_KEVENT_HOST_EVENT;
+ ev->r.host_event.host_no = host_no;
+ ev->r.host_event.code = code;
+ ev->r.host_event.data_size = data_size;
+
+ if (data_size)
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_post_host_event);
+
+void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
+ uint32_t status, uint32_t pid, uint32_t data_size,
+ uint8_t *data)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ struct iscsi_uevent *ev;
+ int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+ skb = alloc_skb(len, GFP_NOIO);
+ if (!skb) {
+ printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
+ return;
+ }
+
+ nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+ ev->transport_handle = iscsi_handle(transport);
+ ev->type = ISCSI_KEVENT_PING_COMP;
+ ev->r.ping_comp.host_no = host_no;
+ ev->r.ping_comp.status = status;
+ ev->r.ping_comp.pid = pid;
+ ev->r.ping_comp.data_size = data_size;
+ memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+ iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
+
static int
iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
void *payload, int size)
@@ -1915,6 +1976,123 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
}
static int
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+ int err;
+
+ if (!transport->send_ping)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.iscsi_ping.host_no);
+ if (!shost) {
+ printk(KERN_ERR "iscsi_ping could not find host no %u\n",
+ ev->u.iscsi_ping.host_no);
+ return -ENODEV;
+ }
+
+ dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev));
+ err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num,
+ ev->u.iscsi_ping.iface_type,
+ ev->u.iscsi_ping.payload_size,
+ ev->u.iscsi_ping.pid,
+ dst_addr);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ struct Scsi_Host *shost = NULL;
+ struct iscsi_chap_rec *chap_rec;
+ struct iscsi_internal *priv;
+ struct sk_buff *skbchap;
+ struct nlmsghdr *nlhchap;
+ struct iscsi_uevent *evchap;
+ uint32_t chap_buf_size;
+ int len, err = 0;
+ char *buf;
+
+ if (!transport->get_chap)
+ return -EINVAL;
+
+ priv = iscsi_if_transport_lookup(transport);
+ if (!priv)
+ return -EINVAL;
+
+ chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
+ len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+
+ shost = scsi_host_lookup(ev->u.get_chap.host_no);
+ if (!shost) {
+ printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
+ __func__, ev->u.get_chap.host_no);
+ return -ENODEV;
+ }
+
+ do {
+ int actual_size;
+
+ skbchap = alloc_skb(len, GFP_KERNEL);
+ if (!skbchap) {
+ printk(KERN_ERR "can not deliver chap: OOM\n");
+ err = -ENOMEM;
+ goto exit_get_chap;
+ }
+
+ nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
+ (len - sizeof(*nlhchap)), 0);
+ evchap = NLMSG_DATA(nlhchap);
+ memset(evchap, 0, sizeof(*evchap));
+ evchap->transport_handle = iscsi_handle(transport);
+ evchap->type = nlh->nlmsg_type;
+ evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
+ evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
+ evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
+ buf = (char *) ((char *)evchap + sizeof(*evchap));
+ memset(buf, 0, chap_buf_size);
+
+ err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
+ &evchap->u.get_chap.num_entries, buf);
+
+ actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+ skb_trim(skbchap, NLMSG_ALIGN(actual_size));
+ nlhchap->nlmsg_len = actual_size;
+
+ err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID,
+ GFP_KERNEL);
+ } while (err < 0 && err != -ECONNREFUSED);
+
+exit_get_chap:
+ scsi_host_put(shost);
+ return err;
+}
+
+static int iscsi_delete_chap(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ int err = 0;
+
+ if (!transport->delete_chap)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.delete_chap.host_no);
+ if (!shost) {
+ printk(KERN_ERR "%s could not find host no %u\n",
+ __func__, ev->u.delete_chap.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
@@ -1941,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CREDS(skb)->pid,
+ NETLINK_CB(skb).pid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -1954,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CREDS(skb)->pid,
+ NETLINK_CB(skb).pid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -2059,6 +2237,15 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_set_iface_params(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
+ case ISCSI_UEVENT_PING:
+ err = iscsi_send_ping(transport, ev);
+ break;
+ case ISCSI_UEVENT_GET_CHAP:
+ err = iscsi_get_chap(transport, nlh);
+ break;
+ case ISCSI_UEVENT_DELETE_CHAP:
+ err = iscsi_delete_chap(transport, ev);
+ break;
default:
err = -ENOSYS;
break;
@@ -2108,9 +2295,11 @@ iscsi_if_rx(struct sk_buff *skb)
*/
if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
+ break;
err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
- } while (err < 0 && err != -ECONNREFUSED);
+ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
mutex_unlock(&rx_queue_mutex);
@@ -2286,6 +2475,8 @@ iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1);
+iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1);
iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
@@ -2382,6 +2573,8 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
&dev_attr_priv_sess_creator.attr,
+ &dev_attr_sess_chap_out_idx.attr,
+ &dev_attr_sess_chap_in_idx.attr,
NULL,
};
@@ -2413,6 +2606,10 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_TARGET_NAME;
else if (attr == &dev_attr_sess_tpgt.attr)
param = ISCSI_PARAM_TPGT;
+ else if (attr == &dev_attr_sess_chap_in_idx.attr)
+ param = ISCSI_PARAM_CHAP_IN_IDX;
+ else if (attr == &dev_attr_sess_chap_out_idx.attr)
+ param = ISCSI_PARAM_CHAP_OUT_IDX;
else if (attr == &dev_attr_sess_password.attr)
param = ISCSI_PARAM_USERNAME;
else if (attr == &dev_attr_sess_password_in.attr)
@@ -2476,12 +2673,16 @@ iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE);
+iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED);
static struct attribute *iscsi_host_attrs[] = {
&dev_attr_host_netdev.attr,
&dev_attr_host_hwaddress.attr,
&dev_attr_host_ipaddress.attr,
&dev_attr_host_initiatorname.attr,
+ &dev_attr_host_port_state.attr,
+ &dev_attr_host_port_speed.attr,
NULL,
};
@@ -2501,6 +2702,10 @@ static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
param = ISCSI_HOST_PARAM_IPADDRESS;
else if (attr == &dev_attr_host_initiatorname.attr)
param = ISCSI_HOST_PARAM_INITIATOR_NAME;
+ else if (attr == &dev_attr_host_port_state.attr)
+ param = ISCSI_HOST_PARAM_PORT_STATE;
+ else if (attr == &dev_attr_host_port_speed.attr)
+ param = ISCSI_HOST_PARAM_PORT_SPEED;
else {
WARN_ONCE(1, "Invalid host attr");
return 0;
@@ -2514,6 +2719,61 @@ static struct attribute_group iscsi_host_group = {
.is_visible = iscsi_host_attr_is_visible,
};
+/* convert iscsi_port_speed values to ascii string name */
+static const struct {
+ enum iscsi_port_speed value;
+ char *name;
+} iscsi_port_speed_names[] = {
+ {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" },
+ {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" },
+ {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
+ {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
+ {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
+};
+
+char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
+{
+ int i;
+ char *speed = "Unknown!";
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t port_speed = ihost->port_speed;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) {
+ if (iscsi_port_speed_names[i].value & port_speed) {
+ speed = iscsi_port_speed_names[i].name;
+ break;
+ }
+ }
+ return speed;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name);
+
+/* convert iscsi_port_state values to ascii string name */
+static const struct {
+ enum iscsi_port_state value;
+ char *name;
+} iscsi_port_state_names[] = {
+ {ISCSI_PORT_STATE_DOWN, "LINK DOWN" },
+ {ISCSI_PORT_STATE_UP, "LINK UP" },
+};
+
+char *iscsi_get_port_state_name(struct Scsi_Host *shost)
+{
+ int i;
+ char *state = "Unknown!";
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ uint32_t port_state = ihost->port_state;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) {
+ if (iscsi_port_state_names[i].value & port_state) {
+ state = iscsi_port_state_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_port_state_name);
+
static int iscsi_session_match(struct attribute_container *cont,
struct device *dev)
{
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9d9330ae4213..f7565fc4f0e3 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -615,6 +615,7 @@ do_sas_phy_reset(struct device *dev, size_t count, int hard_reset)
error = i->f->phy_reset(phy, hard_reset);
if (error)
return error;
+ phy->enabled = 1;
return count;
};
@@ -652,9 +653,21 @@ sas_phy_linkerror_attr(running_disparity_error_count);
sas_phy_linkerror_attr(loss_of_dword_sync_count);
sas_phy_linkerror_attr(phy_reset_problem_count);
+static int sas_phy_setup(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct sas_phy *phy = dev_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ if (i->f->phy_setup)
+ i->f->phy_setup(phy);
+
+ return 0;
+}
static DECLARE_TRANSPORT_CLASS(sas_phy_class,
- "sas_phy", NULL, NULL, NULL);
+ "sas_phy", sas_phy_setup, NULL, NULL);
static int sas_phy_match(struct attribute_container *cont, struct device *dev)
{
@@ -678,7 +691,11 @@ static int sas_phy_match(struct attribute_container *cont, struct device *dev)
static void sas_phy_release(struct device *dev)
{
struct sas_phy *phy = dev_to_phy(dev);
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+ if (i->f->phy_release)
+ i->f->phy_release(phy);
put_device(dev->parent);
kfree(phy);
}
@@ -1044,6 +1061,29 @@ int scsi_is_sas_port(const struct device *dev)
EXPORT_SYMBOL(scsi_is_sas_port);
/**
+ * sas_port_get_phy - try to take a reference on a port member
+ * @port: port to check
+ */
+struct sas_phy *sas_port_get_phy(struct sas_port *port)
+{
+ struct sas_phy *phy;
+
+ mutex_lock(&port->phy_list_mutex);
+ if (list_empty(&port->phy_list))
+ phy = NULL;
+ else {
+ struct list_head *ent = port->phy_list.next;
+
+ phy = list_entry(ent, typeof(*phy), port_siblings);
+ get_device(&phy->dev);
+ }
+ mutex_unlock(&port->phy_list_mutex);
+
+ return phy;
+}
+EXPORT_SYMBOL(sas_port_get_phy);
+
+/**
* sas_port_add_phy - add another phy to a port to form a wide port
* @port: port to add the phy to
* @phy: phy to add
@@ -1603,6 +1643,20 @@ sas_rphy_delete(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_delete);
/**
+ * sas_rphy_unlink - unlink SAS remote PHY
+ * @rphy: SAS remote phy to unlink from its parent port
+ *
+ * Removes port reference to an rphy
+ */
+void sas_rphy_unlink(struct sas_rphy *rphy)
+{
+ struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
+
+ parent->rphy = NULL;
+}
+EXPORT_SYMBOL(sas_rphy_unlink);
+
+/**
* sas_rphy_remove - remove SAS remote PHY
* @rphy: SAS remote phy to remove
*
@@ -1612,7 +1666,6 @@ void
sas_rphy_remove(struct sas_rphy *rphy)
{
struct device *dev = &rphy->dev;
- struct sas_port *parent = dev_to_sas_port(dev->parent);
switch (rphy->identify.device_type) {
case SAS_END_DEVICE:
@@ -1626,10 +1679,9 @@ sas_rphy_remove(struct sas_rphy *rphy)
break;
}
+ sas_rphy_unlink(rphy);
transport_remove_device(dev);
device_del(dev);
-
- parent->rphy = NULL;
}
EXPORT_SYMBOL(sas_rphy_remove);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c691fb50e6cb..5ba5c2a9e8e9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -107,6 +107,7 @@ static int sd_suspend(struct device *, pm_message_t state);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
+static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
@@ -346,6 +347,31 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
return count;
}
+static ssize_t
+sd_show_max_medium_access_timeouts(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
+}
+
+static ssize_t
+sd_store_max_medium_access_timeouts(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
+
+ return err ? err : count;
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -360,6 +386,9 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
sd_store_provisioning_mode),
+ __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
+ sd_show_max_medium_access_timeouts,
+ sd_store_max_medium_access_timeouts),
__ATTR_NULL,
};
@@ -382,6 +411,7 @@ static struct scsi_driver sd_template = {
},
.rescan = sd_rescan,
.done = sd_done,
+ .eh_action = sd_eh_action,
};
/*
@@ -497,6 +527,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
+ sdkp->provisioning_mode = mode;
+
switch (mode) {
case SD_LBP_DISABLE:
@@ -524,8 +556,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
-
- sdkp->provisioning_mode = mode;
}
/**
@@ -634,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
}
/**
- * sd_init_command - build a scsi (read or write) command from
+ * sd_prep_fn - build a scsi (read or write) command from
* information in the request structure.
* @SCpnt: pointer to mid-level's per scsi command structure that
* contains request and into which the scsi command is written
@@ -681,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
ret = BLKPREP_KILL;
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
- "sd_init_command: block=%llu, "
+ "sd_prep_fn: block=%llu, "
"count=%d\n",
(unsigned long long)block,
this_count));
@@ -1182,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
retval = -ENODEV;
if (scsi_block_when_processing_errors(sdp)) {
+ retval = scsi_autopm_get_device(sdp);
+ if (retval)
+ goto out;
+
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
sshdr);
+ scsi_autopm_put_device(sdp);
}
/* failed to execute TUR, assume media not present */
@@ -1313,6 +1348,55 @@ static const struct block_device_operations sd_fops = {
.unlock_native_capacity = sd_unlock_native_capacity,
};
+/**
+ * sd_eh_action - error handling callback
+ * @scmd: sd-issued command that has failed
+ * @eh_cmnd: The command that was sent during error handling
+ * @eh_cmnd_len: Length of eh_cmnd in bytes
+ * @eh_disp: The recovery disposition suggested by the midlayer
+ *
+ * This function is called by the SCSI midlayer upon completion of
+ * an error handling command (TEST UNIT READY, START STOP UNIT,
+ * etc.) The command sent to the device by the error handler is
+ * stored in eh_cmnd. The result of sending the eh command is
+ * passed in eh_disp.
+ **/
+static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,
+ int eh_cmnd_len, int eh_disp)
+{
+ struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+
+ if (!scsi_device_online(scmd->device) ||
+ !scsi_medium_access_command(scmd))
+ return eh_disp;
+
+ /*
+ * The device has timed out executing a medium access command.
+ * However, the TEST UNIT READY command sent during error
+ * handling completed successfully. Either the device is in the
+ * process of recovering or has it suffered an internal failure
+ * that prevents access to the storage medium.
+ */
+ if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS &&
+ eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY)
+ sdkp->medium_access_timed_out++;
+
+ /*
+ * If the device keeps failing read/write commands but TEST UNIT
+ * READY always completes successfully we assume that medium
+ * access is no longer possible and take the device offline.
+ */
+ if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
+ scmd_printk(KERN_ERR, scmd,
+ "Medium access timeout failure. Offlining disk!\n");
+ scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+
+ return FAILED;
+ }
+
+ return eh_disp;
+}
+
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
u64 start_lba = blk_rq_pos(scmd->request);
@@ -1402,6 +1486,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
(!sense_valid || sense_deferred))
goto out;
+ sdkp->medium_access_timed_out = 0;
+
switch (sshdr.sense_key) {
case HARDWARE_ERROR:
case MEDIUM_ERROR:
@@ -2349,7 +2435,7 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
* some USB ones crash on receiving them, and the pages
* we currently ask for are for SPC-3 and beyond
*/
- if (sdp->scsi_level > SCSI_SPC_2)
+ if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
return 1;
return 0;
}
@@ -2523,6 +2609,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sdkp->RCD = 0;
sdkp->ATO = 0;
sdkp->first_scan = 1;
+ sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
sd_revalidate_disk(gd);
@@ -2562,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
* (e.g. /dev/sda). More precisely it is the block device major
* and minor number that is chosen here.
*
- * Assume sd_attach is not re-entrant (for time being)
- * Also think about sd_attach() and sd_remove() running coincidentally.
+ * Assume sd_probe is not re-entrant (for time being)
+ * Also think about sd_probe() and sd_remove() running coincidentally.
**/
static int sd_probe(struct device *dev)
{
@@ -2578,7 +2665,7 @@ static int sd_probe(struct device *dev)
goto out;
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
- "sd_attach\n"));
+ "sd_probe\n"));
error = -ENOMEM;
sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 4163f2910e3d..f703f4827b6f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -20,6 +20,7 @@
*/
#define SD_MAX_RETRIES 5
#define SD_PASSTHROUGH_RETRIES 1
+#define SD_MAX_MEDIUM_TIMEOUTS 2
/*
* Size of the initial data buffer for mode and read capacity data
@@ -59,6 +60,8 @@ struct scsi_disk {
u32 unmap_alignment;
u32 index;
unsigned int physical_block_size;
+ unsigned int max_medium_access_timeouts;
+ unsigned int medium_access_timed_out;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
@@ -88,6 +91,38 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
(sdsk)->disk->disk_name, ##a) : \
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case SYNCHRONIZE_CACHE:
+ case VERIFY:
+ case VERIFY_12:
+ case VERIFY_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_SAME:
+ case WRITE_SAME_16:
+ case UNMAP:
+ return 1;
+ case VARIABLE_LENGTH_CMD:
+ switch (scmd->cmnd[9]) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 0cb39ff21171..e52d5bc42bc4 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@@ -405,16 +405,16 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
phys++;
}
- kunmap_atomic(sdt, KM_USER0);
+ kunmap_atomic(sdt);
}
- bio->bi_flags |= BIO_MAPPED_INTEGRITY;
+ bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
}
return 0;
error:
- kunmap_atomic(sdt, KM_USER0);
+ kunmap_atomic(sdt);
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
__func__, virt, phys, be32_to_cpu(sdt->ref_tag),
be16_to_cpu(sdt->app_tag));
@@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
- sdt = kmap_atomic(iv->bv_page, KM_USER0)
+ sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
if (sectors == 0) {
- kunmap_atomic(sdt, KM_USER0);
+ kunmap_atomic(sdt);
return;
}
@@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sectors--;
}
- kunmap_atomic(sdt, KM_USER0);
+ kunmap_atomic(sdt);
}
}
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9b28f39bac26..e41998cb098e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -42,7 +42,6 @@ static const char *verstr = "20101219";
#include <asm/uaccess.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
@@ -1106,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
STp->drv_buffer));
}
STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
+ if (!STp->drv_buffer && STp->immediate_filemark) {
+ printk(KERN_WARNING
+ "%s: non-buffered tape: disabling writing immediate filemarks\n",
+ name);
+ STp->immediate_filemark = 0;
+ }
}
st_release_request(SRpnt);
SRpnt = NULL;
@@ -1177,6 +1182,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
static int st_open(struct inode *inode, struct file *filp)
{
int i, retval = (-EIO);
+ int resumed = 0;
struct scsi_tape *STp;
struct st_partstat *STps;
int dev = TAPE_NR(inode);
@@ -1211,6 +1217,11 @@ static int st_open(struct inode *inode, struct file *filp)
write_unlock(&st_dev_arr_lock);
STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
+ if (scsi_autopm_get_device(STp->device) < 0) {
+ retval = -EIO;
+ goto err_out;
+ }
+ resumed = 1;
if (!scsi_block_when_processing_errors(STp->device)) {
retval = (-ENXIO);
goto err_out;
@@ -1258,6 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp)
normalize_buffer(STp->buffer);
STp->in_use = 0;
scsi_tape_put(STp);
+ if (resumed)
+ scsi_autopm_put_device(STp->device);
mutex_unlock(&st_mutex);
return retval;
@@ -1306,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_FILEMARKS;
+ if (STp->immediate_filemark)
+ cmd[1] = 1;
cmd[4] = 1 + STp->two_fm;
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -1391,6 +1406,7 @@ static int st_release(struct inode *inode, struct file *filp)
write_lock(&st_dev_arr_lock);
STp->in_use = 0;
write_unlock(&st_dev_arr_lock);
+ scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
return result;
@@ -2172,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char
name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
STp->scsi2_logical);
printk(KERN_INFO
- "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate,
- STp->sili);
+ "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
+ name, STm->sysv, STp->immediate, STp->sili,
+ STp->immediate_filemark);
printk(KERN_INFO "%s: debugging: %d\n",
name, debugging);
}
@@ -2215,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
STp->immediate = (options & MT_ST_NOWAIT) != 0;
+ STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
STm->sysv = (options & MT_ST_SYSV) != 0;
STp->sili = (options & MT_ST_SILI) != 0;
DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
@@ -2246,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
STp->scsi2_logical = value;
if ((options & MT_ST_NOWAIT) != 0)
STp->immediate = value;
+ if ((options & MT_ST_NOWAIT_EOF) != 0)
+ STp->immediate_filemark = value;
if ((options & MT_ST_SYSV) != 0)
STm->sysv = value;
if ((options & MT_ST_SILI) != 0)
@@ -2705,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[0] = WRITE_FILEMARKS;
if (cmd_in == MTWSM)
cmd[1] = 2;
- if (cmd_in == MTWEOFI)
+ if (cmd_in == MTWEOFI ||
+ (cmd_in == MTWEOF && STp->immediate_filemark))
cmd[1] |= 1;
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
@@ -4084,6 +4105,7 @@ static int st_probe(struct device *dev)
tpnt->scsi2_logical = ST_SCSI2LOGICAL;
tpnt->sili = ST_SILI;
tpnt->immediate = ST_NOWAIT;
+ tpnt->immediate_filemark = 0;
tpnt->default_drvbuffer = 0xff; /* No forced buffering */
tpnt->partition = 0;
tpnt->new_partition = 0;
@@ -4154,6 +4176,7 @@ static int st_probe(struct device *dev)
if (error)
goto out_free_tape;
}
+ scsi_autopm_put_device(SDp);
sdev_printk(KERN_NOTICE, SDp,
"Attached scsi tape %s\n", tape_name(tpnt));
@@ -4201,6 +4224,7 @@ static int st_remove(struct device *dev)
struct scsi_tape *tpnt;
int i, j, mode;
+ scsi_autopm_get_device(SDp);
write_lock(&st_dev_arr_lock);
for (i = 0; i < st_dev_max; i++) {
tpnt = scsi_tapes[i];
@@ -4467,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
options |= STm->sysv ? MT_ST_SYSV : 0;
options |= STp->immediate ? MT_ST_NOWAIT : 0;
+ options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
options |= STp->sili ? MT_ST_SILI : 0;
l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f91a67c6d968..ea35632b986c 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -120,6 +120,7 @@ struct scsi_tape {
unsigned char c_algo; /* compression algorithm */
unsigned char pos_unknown; /* after reset position unknown */
unsigned char sili; /* use SILI when reading in variable b mode */
+ unsigned char immediate_filemark; /* write filemark immediately */
int tape_type;
int long_timeout; /* timeout for commands known to take long time */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
new file mode 100644
index 000000000000..83a1972a1999
--- /dev/null
+++ b/drivers/scsi/storvsc_drv.c
@@ -0,0 +1,1548 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/hyperv.h>
+#include <linux/mempool.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_devinfo.h>
+#include <scsi/scsi_dbg.h>
+
+/*
+ * All wire protocol details (storage protocol between the guest and the host)
+ * are consolidated here.
+ *
+ * Begin protocol definitions.
+ */
+
+/*
+ * Version history:
+ * V1 Beta: 0.1
+ * V1 RC < 2008/1/31: 1.0
+ * V1 RC > 2008/1/31: 2.0
+ * Win7: 4.2
+ */
+
+#define VMSTOR_CURRENT_MAJOR 4
+#define VMSTOR_CURRENT_MINOR 2
+
+
+/* Packet structure describing virtual storage requests. */
+enum vstor_packet_operation {
+ VSTOR_OPERATION_COMPLETE_IO = 1,
+ VSTOR_OPERATION_REMOVE_DEVICE = 2,
+ VSTOR_OPERATION_EXECUTE_SRB = 3,
+ VSTOR_OPERATION_RESET_LUN = 4,
+ VSTOR_OPERATION_RESET_ADAPTER = 5,
+ VSTOR_OPERATION_RESET_BUS = 6,
+ VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
+ VSTOR_OPERATION_END_INITIALIZATION = 8,
+ VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
+ VSTOR_OPERATION_QUERY_PROPERTIES = 10,
+ VSTOR_OPERATION_ENUMERATE_BUS = 11,
+ VSTOR_OPERATION_MAXIMUM = 11
+};
+
+/*
+ * Platform neutral description of a scsi request -
+ * this remains the same across the write regardless of 32/64 bit
+ * note: it's patterned off the SCSI_PASS_THROUGH structure
+ */
+#define STORVSC_MAX_CMD_LEN 0x10
+#define STORVSC_SENSE_BUFFER_SIZE 0x12
+#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
+
+struct vmscsi_request {
+ u16 length;
+ u8 srb_status;
+ u8 scsi_status;
+
+ u8 port_number;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+
+ u8 cdb_length;
+ u8 sense_info_length;
+ u8 data_in;
+ u8 reserved;
+
+ u32 data_transfer_length;
+
+ union {
+ u8 cdb[STORVSC_MAX_CMD_LEN];
+ u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
+ u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
+ };
+} __attribute((packed));
+
+
+/*
+ * This structure is sent during the intialization phase to get the different
+ * properties of the channel.
+ */
+struct vmstorage_channel_properties {
+ u16 protocol_version;
+ u8 path_id;
+ u8 target_id;
+
+ /* Note: port number is only really known on the client side */
+ u32 port_number;
+ u32 flags;
+ u32 max_transfer_bytes;
+
+ /*
+ * This id is unique for each channel and will correspond with
+ * vendor specific data in the inquiry data.
+ */
+
+ u64 unique_id;
+} __packed;
+
+/* This structure is sent during the storage protocol negotiations. */
+struct vmstorage_protocol_version {
+ /* Major (MSW) and minor (LSW) version numbers. */
+ u16 major_minor;
+
+ /*
+ * Revision number is auto-incremented whenever this file is changed
+ * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
+ * definitely indicate incompatibility--but it does indicate mismatched
+ * builds.
+ * This is only used on the windows side. Just set it to 0.
+ */
+ u16 revision;
+} __packed;
+
+/* Channel Property Flags */
+#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
+#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
+
+struct vstor_packet {
+ /* Requested operation type */
+ enum vstor_packet_operation operation;
+
+ /* Flags - see below for values */
+ u32 flags;
+
+ /* Status of the request returned from the server side. */
+ u32 status;
+
+ /* Data payload area */
+ union {
+ /*
+ * Structure used to forward SCSI commands from the
+ * client to the server.
+ */
+ struct vmscsi_request vm_srb;
+
+ /* Structure used to query channel properties. */
+ struct vmstorage_channel_properties storage_channel_properties;
+
+ /* Used during version negotiations. */
+ struct vmstorage_protocol_version version;
+ };
+} __packed;
+
+/*
+ * Packet Flags:
+ *
+ * This flag indicates that the server should send back a completion for this
+ * packet.
+ */
+
+#define REQUEST_COMPLETION_FLAG 0x1
+
+/* Matches Windows-end */
+enum storvsc_request_type {
+ WRITE_TYPE = 0,
+ READ_TYPE,
+ UNKNOWN_TYPE,
+};
+
+/*
+ * SRB status codes and masks; a subset of the codes used here.
+ */
+
+#define SRB_STATUS_AUTOSENSE_VALID 0x80
+#define SRB_STATUS_INVALID_LUN 0x20
+#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ERROR 0x04
+
+/*
+ * This is the end of Protocol specific defines.
+ */
+
+
+/*
+ * We setup a mempool to allocate request structures for this driver
+ * on a per-lun basis. The following define specifies the number of
+ * elements in the pool.
+ */
+
+#define STORVSC_MIN_BUF_NR 64
+static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
+
+module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
+
+#define STORVSC_MAX_IO_REQUESTS 128
+
+/*
+ * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
+ * reality, the path/target is not used (ie always set to 0) so our
+ * scsi host adapter essentially has 1 bus with 1 target that contains
+ * up to 256 luns.
+ */
+#define STORVSC_MAX_LUNS_PER_TARGET 64
+#define STORVSC_MAX_TARGETS 1
+#define STORVSC_MAX_CHANNELS 1
+
+
+
+struct storvsc_cmd_request {
+ struct list_head entry;
+ struct scsi_cmnd *cmd;
+
+ unsigned int bounce_sgl_count;
+ struct scatterlist *bounce_sgl;
+
+ struct hv_device *device;
+
+ /* Synchronize the request/response if needed */
+ struct completion wait_event;
+
+ unsigned char *sense_buffer;
+ struct hv_multipage_buffer data_buffer;
+ struct vstor_packet vstor_packet;
+};
+
+
+/* A storvsc device is a device object that contains a vmbus channel */
+struct storvsc_device {
+ struct hv_device *device;
+
+ bool destroy;
+ bool drain_notify;
+ atomic_t num_outstanding_req;
+ struct Scsi_Host *host;
+
+ wait_queue_head_t waiting_to_drain;
+
+ /*
+ * Each unique Port/Path/Target represents 1 channel ie scsi
+ * controller. In reality, the pathid, targetid is always 0
+ * and the port is set by us
+ */
+ unsigned int port_number;
+ unsigned char path_id;
+ unsigned char target_id;
+
+ /* Used for vsc/vsp channel reset process */
+ struct storvsc_cmd_request init_request;
+ struct storvsc_cmd_request reset_request;
+};
+
+struct stor_mem_pools {
+ struct kmem_cache *request_pool;
+ mempool_t *request_mempool;
+};
+
+struct hv_host_device {
+ struct hv_device *dev;
+ unsigned int port;
+ unsigned char path;
+ unsigned char target;
+};
+
+struct storvsc_scan_work {
+ struct work_struct work;
+ struct Scsi_Host *host;
+ uint lun;
+};
+
+static void storvsc_bus_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ int id, order_id;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ for (id = 0; id < wrk->host->max_id; ++id) {
+ if (wrk->host->reverse_ordering)
+ order_id = wrk->host->max_id - id - 1;
+ else
+ order_id = id;
+
+ scsi_scan_target(&wrk->host->shost_gendev, 0,
+ order_id, SCAN_WILD_CARD, 1);
+ }
+ kfree(wrk);
+}
+
+static void storvsc_remove_lun(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ if (!scsi_host_get(wrk->host))
+ goto done;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ scsi_host_put(wrk->host);
+
+done:
+ kfree(wrk);
+}
+
+/*
+ * Major/minor macros. Minor version is in LSB, meaning that earlier flat
+ * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
+ */
+
+static inline u16 storvsc_get_version(u8 major, u8 minor)
+{
+ u16 version;
+
+ version = ((major << 8) | minor);
+ return version;
+}
+
+/*
+ * We can get incoming messages from the host that are not in response to
+ * messages that we have sent out. An example of this would be messages
+ * received by the guest to notify dynamic addition/removal of LUNs. To
+ * deal with potential race conditions where the driver may be in the
+ * midst of being unloaded when we might receive an unsolicited message
+ * from the host, we have implemented a mechanism to gurantee sequential
+ * consistency:
+ *
+ * 1) Once the device is marked as being destroyed, we will fail all
+ * outgoing messages.
+ * 2) We permit incoming messages when the device is being destroyed,
+ * only to properly account for messages already sent out.
+ */
+
+static inline struct storvsc_device *get_out_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (stor_device && stor_device->destroy)
+ stor_device = NULL;
+
+ return stor_device;
+}
+
+
+static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
+{
+ dev->drain_notify = true;
+ wait_event(dev->waiting_to_drain,
+ atomic_read(&dev->num_outstanding_req) == 0);
+ dev->drain_notify = false;
+}
+
+static inline struct storvsc_device *get_in_stor_device(
+ struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+
+ stor_device = hv_get_drvdata(device);
+
+ if (!stor_device)
+ goto get_in_err;
+
+ /*
+ * If the device is being destroyed; allow incoming
+ * traffic only to cleanup outstanding requests.
+ */
+
+ if (stor_device->destroy &&
+ (atomic_read(&stor_device->num_outstanding_req) == 0))
+ stor_device = NULL;
+
+get_in_err:
+ return stor_device;
+
+}
+
+static void destroy_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count)
+{
+ int i;
+ struct page *page_buf;
+
+ for (i = 0; i < sg_count; i++) {
+ page_buf = sg_page((&sgl[i]));
+ if (page_buf != NULL)
+ __free_page(page_buf);
+ }
+
+ kfree(sgl);
+}
+
+static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+{
+ int i;
+
+ /* No need to check */
+ if (sg_count < 2)
+ return -1;
+
+ /* We have at least 2 sg entries */
+ for (i = 0; i < sg_count; i++) {
+ if (i == 0) {
+ /* make sure 1st one does not have hole */
+ if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
+ return i;
+ } else if (i == sg_count - 1) {
+ /* make sure last one does not have hole */
+ if (sgl[i].offset != 0)
+ return i;
+ } else {
+ /* make sure no hole in the middle */
+ if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ unsigned int sg_count,
+ unsigned int len,
+ int write)
+{
+ int i;
+ int num_pages;
+ struct scatterlist *bounce_sgl;
+ struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
+
+ num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
+
+ bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!bounce_sgl)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+ goto cleanup;
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
+ }
+
+ return bounce_sgl;
+
+cleanup:
+ destroy_bounce_buffer(bounce_sgl, num_pages);
+ return NULL;
+}
+
+/* Disgusting wrapper functions */
+static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
+{
+ void *addr = kmap_atomic(sg_page(sgl + idx));
+ return (unsigned long)addr;
+}
+
+static inline void sg_kunmap_atomic(unsigned long addr)
+{
+ kunmap_atomic((void *)addr);
+}
+
+
+/* Assume the original sgl has enough room */
+static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long dest_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ dest = dest_addr;
+ destlen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+
+ while (destlen) {
+ src = bounce_addr + bounce_sgl[j].offset;
+ srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].offset += copylen;
+ destlen -= copylen;
+ dest += copylen;
+
+ if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ /* full */
+ sg_kunmap_atomic(bounce_addr);
+ j++;
+
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
+ /* if we need to use another bounce buffer */
+ if (destlen || i != orig_sgl_count - 1)
+ bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ } else if (destlen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ sg_kunmap_atomic(bounce_addr);
+ }
+ }
+
+ sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
+static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ struct scatterlist *bounce_sgl,
+ unsigned int orig_sgl_count)
+{
+ int i;
+ int j = 0;
+ unsigned long src, dest;
+ unsigned int srclen, destlen, copylen;
+ unsigned int total_copied = 0;
+ unsigned long bounce_addr = 0;
+ unsigned long src_addr = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < orig_sgl_count; i++) {
+ src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ src = src_addr;
+ srclen = orig_sgl[i].length;
+
+ if (bounce_addr == 0)
+ bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+
+ while (srclen) {
+ /* assume bounce offset always == 0 */
+ dest = bounce_addr + bounce_sgl[j].length;
+ destlen = PAGE_SIZE - bounce_sgl[j].length;
+
+ copylen = min(srclen, destlen);
+ memcpy((void *)dest, (void *)src, copylen);
+
+ total_copied += copylen;
+ bounce_sgl[j].length += copylen;
+ srclen -= copylen;
+ src += copylen;
+
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ sg_kunmap_atomic(bounce_addr);
+ j++;
+
+ /* if we need to use another bounce buffer */
+ if (srclen || i != orig_sgl_count - 1)
+ bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+
+ } else if (srclen == 0 && i == orig_sgl_count - 1) {
+ /* unmap the last bounce that is < PAGE_SIZE */
+ sg_kunmap_atomic(bounce_addr);
+ }
+ }
+
+ sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ }
+
+ local_irq_restore(flags);
+
+ return total_copied;
+}
+
+static int storvsc_channel_init(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
+ request = &stor_device->init_request;
+ vstor_packet = &request->vstor_packet;
+
+ /*
+ * Now, initiate the vsc/vsp initialization protocol on the open
+ * channel
+ */
+ memset(request, 0, sizeof(struct storvsc_cmd_request));
+ init_completion(&request->wait_event);
+ vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ /* reuse the packet for version range supported */
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->version.major_minor =
+ storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
+
+ /*
+ * The revision number is only used in Windows; set it to 0.
+ */
+ vstor_packet->version.revision = 0;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->storage_channel_properties.port_number =
+ stor_device->port_number;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+ stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
+ stor_device->target_id
+ = vstor_packet->storage_channel_properties.target_id;
+
+ memset(vstor_packet, 0, sizeof(struct vstor_packet));
+ vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+ vstor_packet->status != 0)
+ goto cleanup;
+
+
+cleanup:
+ return ret;
+}
+
+
+static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
+{
+ struct scsi_cmnd *scmnd = cmd_request->cmd;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ void (*scsi_done_fn)(struct scsi_cmnd *);
+ struct scsi_sense_hdr sense_hdr;
+ struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ if (cmd_request->bounce_sgl_count) {
+ if (vm_srb->data_in == READ_TYPE)
+ copy_from_bounce_buffer(scsi_sglist(scmnd),
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+ }
+
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_ERROR)
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ else
+ scmnd->result = vm_srb->scsi_status;
+
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
+ if (scmnd->result) {
+ if (scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr))
+ scsi_print_sense_hdr("storvsc", &sense_hdr);
+ }
+
+ scsi_set_resid(scmnd,
+ cmd_request->data_buffer.len -
+ vm_srb->data_transfer_length);
+
+ scsi_done_fn = scmnd->scsi_done;
+
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done = NULL;
+
+ scsi_done_fn(scmnd);
+
+ mempool_free(cmd_request, memp->request_mempool);
+}
+
+static void storvsc_on_io_completion(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *stor_pkt;
+
+ stor_device = hv_get_drvdata(device);
+ stor_pkt = &request->vstor_packet;
+
+ /*
+ * The current SCSI handling on the host side does
+ * not correctly handle:
+ * INQUIRY command with page code parameter set to 0x80
+ * MODE_SENSE command with cmd[2] == 0x1c
+ *
+ * Setup srb and scsi status so this won't be fatal.
+ * We do this so we can distinguish truly fatal failues
+ * (srb status == 0x4) and off-line the device in that case.
+ */
+
+ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
+ (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
+ vstor_packet->vm_srb.scsi_status = 0;
+ vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
+ }
+
+
+ /* Copy over the status...etc */
+ stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
+ stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
+ stor_pkt->vm_srb.sense_info_length =
+ vstor_packet->vm_srb.sense_info_length;
+
+ if (vstor_packet->vm_srb.scsi_status != 0 ||
+ vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
+ dev_warn(&device->device,
+ "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
+ stor_pkt->vm_srb.cdb[0],
+ vstor_packet->vm_srb.scsi_status,
+ vstor_packet->vm_srb.srb_status);
+ }
+
+ if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
+ /* CHECK_CONDITION */
+ if (vstor_packet->vm_srb.srb_status &
+ SRB_STATUS_AUTOSENSE_VALID) {
+ /* autosense data available */
+ dev_warn(&device->device,
+ "stor pkt %p autosense data valid - len %d\n",
+ request,
+ vstor_packet->vm_srb.sense_info_length);
+
+ memcpy(request->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ vstor_packet->vm_srb.sense_info_length);
+
+ }
+ }
+
+ stor_pkt->vm_srb.data_transfer_length =
+ vstor_packet->vm_srb.data_transfer_length;
+
+ storvsc_command_completion(request);
+
+ if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
+ stor_device->drain_notify)
+ wake_up(&stor_device->waiting_to_drain);
+
+
+}
+
+static void storvsc_on_receive(struct hv_device *device,
+ struct vstor_packet *vstor_packet,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_scan_work *work;
+ struct storvsc_device *stor_device;
+
+ switch (vstor_packet->operation) {
+ case VSTOR_OPERATION_COMPLETE_IO:
+ storvsc_on_io_completion(device, vstor_packet, request);
+ break;
+
+ case VSTOR_OPERATION_REMOVE_DEVICE:
+ case VSTOR_OPERATION_ENUMERATE_BUS:
+ stor_device = get_in_stor_device(device);
+ work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, storvsc_bus_scan);
+ work->host = stor_device->host;
+ schedule_work(&work->work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void storvsc_on_channel_callback(void *context)
+{
+ struct hv_device *device = (struct hv_device *)context;
+ struct storvsc_device *stor_device;
+ u32 bytes_recvd;
+ u64 request_id;
+ unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
+ struct storvsc_cmd_request *request;
+ int ret;
+
+
+ stor_device = get_in_stor_device(device);
+ if (!stor_device)
+ return;
+
+ do {
+ ret = vmbus_recvpacket(device->channel, packet,
+ ALIGN(sizeof(struct vstor_packet), 8),
+ &bytes_recvd, &request_id);
+ if (ret == 0 && bytes_recvd > 0) {
+
+ request = (struct storvsc_cmd_request *)
+ (unsigned long)request_id;
+
+ if ((request == &stor_device->init_request) ||
+ (request == &stor_device->reset_request)) {
+
+ memcpy(&request->vstor_packet, packet,
+ sizeof(struct vstor_packet));
+ complete(&request->wait_event);
+ } else {
+ storvsc_on_receive(device,
+ (struct vstor_packet *)packet,
+ request);
+ }
+ } else {
+ break;
+ }
+ } while (1);
+
+ return;
+}
+
+static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
+{
+ struct vmstorage_channel_properties props;
+ int ret;
+
+ memset(&props, 0, sizeof(struct vmstorage_channel_properties));
+
+ ret = vmbus_open(device->channel,
+ ring_size,
+ ring_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, device);
+
+ if (ret != 0)
+ return ret;
+
+ ret = storvsc_channel_init(device);
+
+ return ret;
+}
+
+static int storvsc_dev_remove(struct hv_device *device)
+{
+ struct storvsc_device *stor_device;
+ unsigned long flags;
+
+ stor_device = hv_get_drvdata(device);
+
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ stor_device->destroy = true;
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /*
+ * At this point, all outbound traffic should be disable. We
+ * only allow inbound traffic (responses) to proceed so that
+ * outstanding requests can be completed.
+ */
+
+ storvsc_wait_to_drain(stor_device);
+
+ /*
+ * Since we have already drained, we don't need to busy wait
+ * as was done in final_release_stor_device()
+ * Note that we cannot set the ext pointer to NULL until
+ * we have drained - to drain the outgoing packets, we need to
+ * allow incoming packets.
+ */
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ hv_set_drvdata(device, NULL);
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Close the channel */
+ vmbus_close(device->channel);
+
+ kfree(stor_device);
+ return 0;
+}
+
+static int storvsc_do_io(struct hv_device *device,
+ struct storvsc_cmd_request *request)
+{
+ struct storvsc_device *stor_device;
+ struct vstor_packet *vstor_packet;
+ int ret = 0;
+
+ vstor_packet = &request->vstor_packet;
+ stor_device = get_out_stor_device(device);
+
+ if (!stor_device)
+ return -ENODEV;
+
+
+ request->device = device;
+
+
+ vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
+
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
+
+
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
+
+
+ vstor_packet->vm_srb.data_transfer_length =
+ request->data_buffer.len;
+
+ vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
+
+ if (request->data_buffer.len) {
+ ret = vmbus_sendpacket_multipagebuffer(device->channel,
+ &request->data_buffer,
+ vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request);
+ } else {
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ }
+
+ if (ret != 0)
+ return ret;
+
+ atomic_inc(&stor_device->num_outstanding_req);
+
+ return ret;
+}
+
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+ struct stor_mem_pools *memp;
+ int number = STORVSC_MIN_BUF_NR;
+
+ memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
+ if (!memp)
+ return -ENOMEM;
+
+ memp->request_pool =
+ kmem_cache_create(dev_name(&sdevice->sdev_dev),
+ sizeof(struct storvsc_cmd_request), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (!memp->request_pool)
+ goto err0;
+
+ memp->request_mempool = mempool_create(number, mempool_alloc_slab,
+ mempool_free_slab,
+ memp->request_pool);
+
+ if (!memp->request_mempool)
+ goto err1;
+
+ sdevice->hostdata = memp;
+
+ return 0;
+
+err1:
+ kmem_cache_destroy(memp->request_pool);
+
+err0:
+ kfree(memp);
+ return -ENOMEM;
+}
+
+static void storvsc_device_destroy(struct scsi_device *sdevice)
+{
+ struct stor_mem_pools *memp = sdevice->hostdata;
+
+ mempool_destroy(memp->request_mempool);
+ kmem_cache_destroy(memp->request_pool);
+ kfree(memp);
+ sdevice->hostdata = NULL;
+}
+
+static int storvsc_device_configure(struct scsi_device *sdevice)
+{
+ scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
+ STORVSC_MAX_IO_REQUESTS);
+
+ blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
+
+ blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+
+ return 0;
+}
+
+static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
+ sector_t capacity, int *info)
+{
+ sector_t nsect = capacity;
+ sector_t cylinders = nsect;
+ int heads, sectors_pt;
+
+ /*
+ * We are making up these values; let us keep it simple.
+ */
+ heads = 0xff;
+ sectors_pt = 0x3f; /* Sectors per track */
+ sector_div(cylinders, heads * sectors_pt);
+ if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
+ cylinders = 0xffff;
+
+ info[0] = heads;
+ info[1] = sectors_pt;
+ info[2] = (int)cylinders;
+
+ return 0;
+}
+
+static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+{
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
+ struct hv_device *device = host_dev->dev;
+
+ struct storvsc_device *stor_device;
+ struct storvsc_cmd_request *request;
+ struct vstor_packet *vstor_packet;
+ int ret, t;
+
+
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return FAILED;
+
+ request = &stor_device->reset_request;
+ vstor_packet = &request->vstor_packet;
+
+ init_completion(&request->wait_event);
+
+ vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
+ vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+ vstor_packet->vm_srb.path_id = stor_device->path_id;
+
+ ret = vmbus_sendpacket(device->channel, vstor_packet,
+ sizeof(struct vstor_packet),
+ (unsigned long)&stor_device->reset_request,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0)
+ return FAILED;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0)
+ return TIMEOUT_ERROR;
+
+
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
+ */
+
+ return SUCCESS;
+}
+
+static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+{
+ bool allowed = true;
+ u8 scsi_op = scmnd->cmnd[0];
+
+ switch (scsi_op) {
+ /*
+ * smartd sends this command and the host does not handle
+ * this. So, don't send it.
+ */
+ case SET_WINDOW:
+ scmnd->result = ILLEGAL_REQUEST << 16;
+ allowed = false;
+ break;
+ default:
+ break;
+ }
+ return allowed;
+}
+
+static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+{
+ int ret;
+ struct hv_host_device *host_dev = shost_priv(host);
+ struct hv_device *dev = host_dev->dev;
+ struct storvsc_cmd_request *cmd_request;
+ unsigned int request_size = 0;
+ int i;
+ struct scatterlist *sgl;
+ unsigned int sg_count = 0;
+ struct vmscsi_request *vm_srb;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
+
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
+ scmnd->scsi_done(scmnd);
+ return 0;
+ }
+
+ request_size = sizeof(struct storvsc_cmd_request);
+
+ cmd_request = mempool_alloc(memp->request_mempool,
+ GFP_ATOMIC);
+
+ /*
+ * We might be invoked in an interrupt context; hence
+ * mempool_alloc() can fail.
+ */
+ if (!cmd_request)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
+
+ /* Setup the cmd request */
+ cmd_request->cmd = scmnd;
+
+ scmnd->host_scribble = (unsigned char *)cmd_request;
+
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+
+
+ /* Build the SRB */
+ switch (scmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ vm_srb->data_in = WRITE_TYPE;
+ break;
+ case DMA_FROM_DEVICE:
+ vm_srb->data_in = READ_TYPE;
+ break;
+ default:
+ vm_srb->data_in = UNKNOWN_TYPE;
+ break;
+ }
+
+
+ vm_srb->port_number = host_dev->port;
+ vm_srb->path_id = scmnd->device->channel;
+ vm_srb->target_id = scmnd->device->id;
+ vm_srb->lun = scmnd->device->lun;
+
+ vm_srb->cdb_length = scmnd->cmd_len;
+
+ memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
+
+ cmd_request->sense_buffer = scmnd->sense_buffer;
+
+
+ cmd_request->data_buffer.len = scsi_bufflen(scmnd);
+ if (scsi_sg_count(scmnd)) {
+ sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
+
+ /* check if we need to bounce the sgl */
+ if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
+ cmd_request->bounce_sgl =
+ create_bounce_buffer(sgl, scsi_sg_count(scmnd),
+ scsi_bufflen(scmnd),
+ vm_srb->data_in);
+ if (!cmd_request->bounce_sgl) {
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ goto queue_error;
+ }
+
+ cmd_request->bounce_sgl_count =
+ ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
+ PAGE_SHIFT;
+
+ if (vm_srb->data_in == WRITE_TYPE)
+ copy_to_bounce_buffer(sgl,
+ cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd));
+
+ sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
+ }
+
+ cmd_request->data_buffer.offset = sgl[0].offset;
+
+ for (i = 0; i < sg_count; i++)
+ cmd_request->data_buffer.pfn_array[i] =
+ page_to_pfn(sg_page((&sgl[i])));
+
+ } else if (scsi_sglist(scmnd)) {
+ cmd_request->data_buffer.offset =
+ virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
+ cmd_request->data_buffer.pfn_array[0] =
+ virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
+ }
+
+ /* Invokes the vsc to start an IO */
+ ret = storvsc_do_io(dev, cmd_request);
+
+ if (ret == -EAGAIN) {
+ /* no more space */
+
+ if (cmd_request->bounce_sgl_count) {
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
+ cmd_request->bounce_sgl_count);
+
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto queue_error;
+ }
+ }
+
+ return 0;
+
+queue_error:
+ mempool_free(cmd_request, memp->request_mempool);
+ scmnd->host_scribble = NULL;
+ return ret;
+}
+
+static struct scsi_host_template scsi_driver = {
+ .module = THIS_MODULE,
+ .name = "storvsc_host_t",
+ .bios_param = storvsc_get_chs,
+ .queuecommand = storvsc_queuecommand,
+ .eh_host_reset_handler = storvsc_host_reset_handler,
+ .slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
+ .slave_configure = storvsc_device_configure,
+ .cmd_per_lun = 1,
+ /* 64 max_queue * 1 target */
+ .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
+ .this_id = -1,
+ /* no use setting to 0 since ll_blk_rw reset it to 1 */
+ /* currently 32 */
+ .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
+ .use_clustering = DISABLE_CLUSTERING,
+ /* Make sure we dont get a sg segment crosses a page boundary */
+ .dma_boundary = PAGE_SIZE-1,
+};
+
+enum {
+ SCSI_GUID,
+ IDE_GUID,
+};
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* SCSI guid */
+ { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
+ 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .driver_data = SCSI_GUID },
+ /* IDE guid */
+ { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
+ 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .driver_data = IDE_GUID },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static int storvsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ int ret;
+ struct Scsi_Host *host;
+ struct hv_host_device *host_dev;
+ bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
+ int target = 0;
+ struct storvsc_device *stor_device;
+
+ host = scsi_host_alloc(&scsi_driver,
+ sizeof(struct hv_host_device));
+ if (!host)
+ return -ENOMEM;
+
+ host_dev = shost_priv(host);
+ memset(host_dev, 0, sizeof(struct hv_host_device));
+
+ host_dev->port = host->host_no;
+ host_dev->dev = device;
+
+
+ stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
+ if (!stor_device) {
+ ret = -ENOMEM;
+ goto err_out0;
+ }
+
+ stor_device->destroy = false;
+ init_waitqueue_head(&stor_device->waiting_to_drain);
+ stor_device->device = device;
+ stor_device->host = host;
+ hv_set_drvdata(device, stor_device);
+
+ stor_device->port_number = host->host_no;
+ ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
+ if (ret)
+ goto err_out1;
+
+ host_dev->path = stor_device->path_id;
+ host_dev->target = stor_device->target_id;
+
+ /* max # of devices per target */
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+ /* max # of targets per channel */
+ host->max_id = STORVSC_MAX_TARGETS;
+ /* max # of channels */
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
+ /* max cmd length */
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+
+ /* Register the HBA and start the scsi bus scan */
+ ret = scsi_add_host(host, &device->device);
+ if (ret != 0)
+ goto err_out2;
+
+ if (!dev_is_ide) {
+ scsi_scan_host(host);
+ } else {
+ target = (device->dev_instance.b[5] << 8 |
+ device->dev_instance.b[4]);
+ ret = scsi_add_device(host, 0, target, 0);
+ if (ret) {
+ scsi_remove_host(host);
+ goto err_out2;
+ }
+ }
+ return 0;
+
+err_out2:
+ /*
+ * Once we have connected with the host, we would need to
+ * to invoke storvsc_dev_remove() to rollback this state and
+ * this call also frees up the stor_device; hence the jump around
+ * err_out1 label.
+ */
+ storvsc_dev_remove(device);
+ goto err_out0;
+
+err_out1:
+ kfree(stor_device);
+
+err_out0:
+ scsi_host_put(host);
+ return ret;
+}
+
+static int storvsc_remove(struct hv_device *dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(dev);
+ struct Scsi_Host *host = stor_device->host;
+
+ scsi_remove_host(host);
+ storvsc_dev_remove(dev);
+ scsi_host_put(host);
+
+ return 0;
+}
+
+static struct hv_driver storvsc_drv = {
+ .name = KBUILD_MODNAME,
+ .id_table = id_table,
+ .probe = storvsc_probe,
+ .remove = storvsc_remove,
+};
+
+static int __init storvsc_drv_init(void)
+{
+ u32 max_outstanding_req_per_channel;
+
+ /*
+ * Divide the ring buffer data size (which is 1 page less
+ * than the ring buffer size since that page is reserved for
+ * the ring buffer indices) by the max request size (which is
+ * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
+ */
+ max_outstanding_req_per_channel =
+ ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
+ sizeof(struct vstor_packet) + sizeof(u64),
+ sizeof(u64)));
+
+ if (max_outstanding_req_per_channel <
+ STORVSC_MAX_IO_REQUESTS)
+ return -EINVAL;
+
+ return vmbus_driver_register(&storvsc_drv);
+}
+
+static void __exit storvsc_drv_exit(void)
+{
+ vmbus_driver_unregister(&storvsc_drv);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HV_DRV_VERSION);
+MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
+module_init(storvsc_drv_init);
+module_exit(storvsc_drv_exit);
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index baf7328de956..6e25889db9d4 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -63,7 +63,6 @@
#include <linux/blkdev.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/sun3ints.h>
#include <asm/dvma.h>
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index fbba78e5722e..a3dd55d1d2fd 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -25,7 +25,6 @@
#include <linux/blkdev.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/sun3ints.h>
#include <asm/dvma.h>
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 012c86edd59f..ac4eca6a5328 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -37,7 +37,6 @@
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <asm/dma.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <linux/blkdev.h>
#include <linux/isapnp.h>
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 041eaaace2c3..d672d97fb84a 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -106,7 +106,6 @@
* $Log: t128.c,v $
*/
-#include <asm/system.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 90e104d6b558..9c216e563568 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -410,7 +410,6 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/byteorder.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644
index 000000000000..8f27f9d6f91d
--- /dev/null
+++ b/drivers/scsi/ufs/Kconfig
@@ -0,0 +1,49 @@
+#
+# Kernel configuration file for the UFS Host Controller
+#
+# This code is based on drivers/scsi/ufs/Kconfig
+# Copyright (C) 2011 Samsung Samsung India Software Operations
+#
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage host controller driver"
+ depends on PCI && SCSI
+ ---help---
+ This is a generic driver which supports PCIe UFS Host controllers.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644
index 000000000000..adf7895a6a91
--- /dev/null
+++ b/drivers/scsi/ufs/Makefile
@@ -0,0 +1,2 @@
+# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644
index 000000000000..b207529f8d54
--- /dev/null
+++ b/drivers/scsi/ufs/ufs.h
@@ -0,0 +1,207 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufs.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#define MAX_CDB_SIZE 16
+
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+ ((byte3 << 24) | (byte2 << 16) |\
+ (byte1 << 8) | (byte0))
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+ UFS_ABORT_TASK = 0x01,
+ UFS_ABORT_TASK_SET = 0x02,
+ UFS_CLEAR_TASK_SET = 0x04,
+ UFS_LOGICAL_RESET = 0x08,
+ UFS_QUERY_TASK = 0x80,
+ UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+ UPIU_TRANSACTION_NOP_OUT = 0x00,
+ UPIU_TRANSACTION_COMMAND = 0x01,
+ UPIU_TRANSACTION_DATA_OUT = 0x02,
+ UPIU_TRANSACTION_TASK_REQ = 0x04,
+ UPIU_TRANSACTION_QUERY_REQ = 0x26,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+ UPIU_TRANSACTION_NOP_IN = 0x20,
+ UPIU_TRANSACTION_RESPONSE = 0x21,
+ UPIU_TRANSACTION_DATA_IN = 0x22,
+ UPIU_TRANSACTION_TASK_RSP = 0x24,
+ UPIU_TRANSACTION_READY_XFER = 0x31,
+ UPIU_TRANSACTION_QUERY_RSP = 0x36,
+};
+
+/* UPIU Read/Write flags */
+enum {
+ UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_WRITE = 0x20,
+ UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+ UPIU_TASK_ATTR_SIMPLE = 0x00,
+ UPIU_TASK_ATTR_ORDERED = 0x01,
+ UPIU_TASK_ATTR_HEADQ = 0x02,
+ UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum {
+ UPIU_QUERY_OPCODE_NOP = 0x0,
+ UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+ UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+ UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+ UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+ UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+ UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+ UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+ UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+ UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+ UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+enum {
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+};
+
+/* Task management service response */
+enum {
+ UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+ UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+ UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+ UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+ UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ u32 dword_0;
+ u32 dword_1;
+ u32 dword_2;
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ struct utp_upiu_header header;
+ u32 exp_data_transfer_len;
+ u8 cdb[MAX_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_rsp - Response UPIU structure
+ * @header: UPIU header DW-0 to DW-2
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ u32 residual_transfer_count;
+ u32 reserved[4];
+ u16 sense_data_len;
+ u8 sense_data[18];
+};
+
+/**
+ * struct utp_upiu_task_req - Task request UPIU structure
+ * @header - UPIU header structure DW0 to DW-2
+ * @input_param1: Input parameter 1 DW-3
+ * @input_param2: Input parameter 2 DW-4
+ * @input_param3: Input parameter 3 DW-5
+ * @reserved: Reserved double words DW-6 to DW-7
+ */
+struct utp_upiu_task_req {
+ struct utp_upiu_header header;
+ u32 input_param1;
+ u32 input_param2;
+ u32 input_param3;
+ u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_task_rsp - Task Management Response UPIU structure
+ * @header: UPIU header structure DW0-DW-2
+ * @output_param1: Ouput parameter 1 DW3
+ * @output_param2: Output parameter 2 DW4
+ * @reserved: Reserved double words DW-5 to DW-7
+ */
+struct utp_upiu_task_rsp {
+ struct utp_upiu_header header;
+ u32 output_param1;
+ u32 output_param2;
+ u32 reserved[3];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644
index 000000000000..52b96e8bf92e
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -0,0 +1,1978 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.1"
+
+enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+ UFSHCD_MAX_LUNS = 8,
+ UFSHCD_CMD_PER_LUN = 32,
+ UFSHCD_CAN_QUEUE = 32,
+};
+
+/* UFSHCD states */
+enum {
+ UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_RESET,
+ UFSHCD_STATE_ERROR,
+};
+
+/* Interrupt configuration options */
+enum {
+ UFSHCD_INT_DISABLE,
+ UFSHCD_INT_ENABLE,
+ UFSHCD_INT_CLEAR,
+};
+
+/* Interrupt aggregation options */
+enum {
+ INT_AGGR_RESET,
+ INT_AGGR_CONFIG,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @pdev: PCI device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+ return readl(hba->mmio_base + REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ * the host controller
+ * @reg_hcs - host controller status register value
+ *
+ * Returns 0 if device present, non-zero if no device detected
+ */
+static inline int ufshcd_is_device_present(u32 reg_hcs)
+{
+ return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+ return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+ return task_req_descp->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ *
+ * Returns maximum number of task management request slots in case of
+ * task management queue full or returns the free slot number
+ */
+static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+{
+ return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+ writel(~(1 << pos),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+ /*
+ * The mask 0xFF is for the following HCS register bits
+ * Bit Description
+ * 0 Device Present
+ * 1 UTRLRDY
+ * 2 UTMRLRDY
+ * 3 UCRDY
+ * 4 HEI
+ * 5 DEI
+ * 6-7 reserved
+ */
+ return (((reg) & (0xFF)) >> 1) ^ (0x07);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+ return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
+ MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_free_hba_memory - Free allocated memory for LRB, request
+ * and task lists
+ * @hba: Pointer to adapter instance
+ */
+static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ kfree(hba->lrb);
+
+ if (hba->utmrdl_base_addr) {
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
+ }
+
+ if (hba->utrdl_base_addr) {
+ utrdl_size =
+ (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ hba->utrdl_base_addr, hba->utrdl_dma_addr);
+ }
+
+ if (hba->ucdl_base_addr) {
+ ucdl_size =
+ (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ hba->ucdl_base_addr, hba->ucdl_dma_addr);
+ }
+}
+
+/**
+ * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function checks the response UPIU for valid transaction type in
+ * response field
+ * Returns 0 on success, non-zero on failure
+ */
+static inline int
+ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
+ UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/**
+ * ufshcd_config_int_aggr - Configure interrupt aggregation values.
+ * Currently there is no use case where we want to configure
+ * interrupt aggregation dynamically. So to configure interrupt
+ * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
+ * INT_AGGR_TIMEOUT_VALUE are used.
+ * @hba: per adapter instance
+ * @option: Interrupt aggregation option
+ */
+static inline void
+ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+{
+ switch (option) {
+ case INT_AGGR_RESET:
+ writel((INT_AGGR_ENABLE |
+ INT_AGGR_COUNTER_AND_TIMER_RESET),
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+ break;
+ case INT_AGGR_CONFIG:
+ writel((INT_AGGR_ENABLE |
+ INT_AGGR_PARAM_WRITE |
+ INT_AGGR_COUNTER_THRESHOLD_VALUE |
+ INT_AGGR_TIMEOUT_VALUE),
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+ break;
+ }
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ * When run-stop registers are set to 1, it indicates the
+ * host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+ writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+ (hba->mmio_base +
+ REG_UTP_TASK_REQ_LIST_RUN_STOP));
+ writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+ (hba->mmio_base +
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+ return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ writel((1 << task_tag),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrb - pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+ int len;
+ if (lrbp->sense_buffer) {
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+ memcpy(lrbp->sense_buffer,
+ lrbp->ucd_rsp_ptr->sense_data,
+ min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ }
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+ hba->capabilities =
+ readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
+
+ /* nutrs and nutmrs are 0 based values */
+ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+ hba->nutmrs =
+ ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_send_uic_command - Send UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_command: UIC command
+ */
+static inline void
+ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
+{
+ /* Write Args */
+ writel(uic_cmnd->argument1,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
+ writel(uic_cmnd->argument2,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
+ writel(uic_cmnd->argument3,
+ (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
+
+ /* Write UIC Cmd */
+ writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
+ (hba->mmio_base + REG_UIC_COMMAND));
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @lrbp - pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+{
+ struct ufshcd_sg_entry *prd_table;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmd;
+ int sg_segments;
+ int i;
+
+ cmd = lrbp->cmd;
+ sg_segments = scsi_dma_map(cmd);
+ if (sg_segments < 0)
+ return sg_segments;
+
+ if (sg_segments) {
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
+
+ prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+ scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ prd_table[i].size =
+ cpu_to_le32(((u32) sg_dma_len(sg))-1);
+ prd_table[i].base_addr =
+ cpu_to_le32(lower_32_bits(sg->dma_address));
+ prd_table[i].upper_addr =
+ cpu_to_le32(upper_32_bits(sg->dma_address));
+ }
+ } else {
+ lrbp->utr_descriptor_ptr->prd_table_length = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_int_config - enable/disable interrupts
+ * @hba: per adapter instance
+ * @option: interrupt option
+ */
+static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
+{
+ switch (option) {
+ case UFSHCD_INT_ENABLE:
+ writel(hba->int_enable_mask,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ break;
+ case UFSHCD_INT_DISABLE:
+ if (hba->ufs_version == UFSHCI_VERSION_10)
+ writel(INTERRUPT_DISABLE_MASK_10,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ else
+ writel(INTERRUPT_DISABLE_MASK_11,
+ (hba->mmio_base + REG_INTERRUPT_ENABLE));
+ break;
+ }
+}
+
+/**
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @lrb - pointer to local reference block
+ */
+static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_transfer_req_desc *req_desc;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ u32 data_direction;
+ u32 upiu_flags;
+
+ ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
+ req_desc = lrbp->utr_descriptor_ptr;
+
+ switch (lrbp->command_type) {
+ case UTP_CMD_TYPE_SCSI:
+ if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 =
+ cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
+
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* command descriptor fields */
+ ucd_cmd_ptr->header.dword_0 =
+ cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
+ upiu_flags,
+ lrbp->lun,
+ lrbp->task_tag));
+ ucd_cmd_ptr->header.dword_1 =
+ cpu_to_be32(
+ UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
+ 0,
+ 0,
+ 0));
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_cmd_ptr->header.dword_2 = 0;
+
+ ucd_cmd_ptr->exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->transfersize);
+
+ memcpy(ucd_cmd_ptr->cdb,
+ lrbp->cmd->cmnd,
+ (min_t(unsigned short,
+ lrbp->cmd->cmd_len,
+ MAX_CDB_SIZE)));
+ break;
+ case UTP_CMD_TYPE_DEV_MANAGE:
+ /* For query function implementation */
+ break;
+ case UTP_CMD_TYPE_UFS:
+ /* For UFS native command implementation */
+ break;
+ } /* end of switch */
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @cmd: command from SCSI Midlayer
+ * @done: call back function
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct ufshcd_lrb *lrbp;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ int tag;
+ int err = 0;
+
+ hba = shost_priv(host);
+
+ tag = cmd->request->tag;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ lrbp = &hba->lrb[tag];
+
+ lrbp->cmd = cmd;
+ lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_buffer = cmd->sense_buffer;
+ lrbp->task_tag = tag;
+ lrbp->lun = cmd->device->lun;
+
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+ /* form UPIU before issuing the command */
+ ufshcd_compose_upiu(lrbp);
+ err = ufshcd_map_sg(lrbp);
+ if (err)
+ goto out;
+
+ /* issue command to the controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ * (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+ size_t utmrdl_size, utrdl_size, ucdl_size;
+
+ /* Allocate memory for UTP command descriptors */
+ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ ucdl_size,
+ &hba->ucdl_dma_addr,
+ GFP_KERNEL);
+
+ /*
+ * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+ * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+ * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+ * be aligned to 128 bytes as well
+ */
+ if (!hba->ucdl_base_addr ||
+ WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Command Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Transfer descriptors
+ * UFSHCI requires 1024 byte alignment of UTRD
+ */
+ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+ hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ utrdl_size,
+ &hba->utrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utrdl_base_addr ||
+ WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Transfer Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /*
+ * Allocate memory for UTP Task Management descriptors
+ * UFSHCI requires 1024 byte alignment of UTMRD
+ */
+ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+ hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ utmrdl_size,
+ &hba->utmrdl_dma_addr,
+ GFP_KERNEL);
+ if (!hba->utmrdl_base_addr ||
+ WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ dev_err(&hba->pdev->dev,
+ "Task Management Descriptor Memory allocation failed\n");
+ goto out;
+ }
+
+ /* Allocate memory for local reference block */
+ hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
+ if (!hba->lrb) {
+ dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ goto out;
+ }
+ return 0;
+out:
+ ufshcd_free_hba_memory(hba);
+ return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ * memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+ struct utp_transfer_cmd_desc *cmd_descp;
+ struct utp_transfer_req_desc *utrdlp;
+ dma_addr_t cmd_desc_dma_addr;
+ dma_addr_t cmd_desc_element_addr;
+ u16 response_offset;
+ u16 prdt_offset;
+ int cmd_desc_size;
+ int i;
+
+ utrdlp = hba->utrdl_base_addr;
+ cmd_descp = hba->ucdl_base_addr;
+
+ response_offset =
+ offsetof(struct utp_transfer_cmd_desc, response_upiu);
+ prdt_offset =
+ offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+ for (i = 0; i < hba->nutrs; i++) {
+ /* Configure UTRD with command descriptor base address */
+ cmd_desc_element_addr =
+ (cmd_desc_dma_addr + (cmd_desc_size * i));
+ utrdlp[i].command_desc_base_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+ utrdlp[i].command_desc_base_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+ /* Response upiu and prdt offset should be in double words */
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16((response_offset >> 2));
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16((prdt_offset >> 2));
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+
+ hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].ucd_cmd_ptr =
+ (struct utp_upiu_cmd *)(cmd_descp + i);
+ hba->lrb[i].ucd_rsp_ptr =
+ (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_prdt_ptr =
+ (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+ struct uic_command *uic_cmd;
+ unsigned long flags;
+
+ /* check if controller is ready to accept UIC commands */
+ if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
+ UIC_COMMAND_READY) == 0x0) {
+ dev_err(&hba->pdev->dev,
+ "Controller not ready"
+ " to accept UIC commands\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* form UIC command */
+ uic_cmd = &hba->active_uic_cmd;
+ uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
+ uic_cmd->argument1 = 0;
+ uic_cmd->argument2 = 0;
+ uic_cmd->argument3 = 0;
+
+ /* enable UIC related interrupts */
+ hba->int_enable_mask |= UIC_COMMAND_COMPL;
+ ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+ /* sending UIC commands to controller */
+ ufshcd_send_uic_command(hba, uic_cmd);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Check if device is present
+ * 2. Configure run-stop-registers
+ * 3. Enable required interrupts
+ * 4. Configure interrupt aggregation
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 reg;
+
+ /* check if device present */
+ reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
+ if (ufshcd_is_device_present(reg)) {
+ dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+ * DEI, HEI bits must be 0
+ */
+ if (!(ufshcd_get_lists_status(reg))) {
+ ufshcd_enable_run_stop_reg(hba);
+ } else {
+ dev_err(&hba->pdev->dev,
+ "Host controller not ready to process requests");
+ err = -EIO;
+ goto out;
+ }
+
+ /* Enable required interrupts */
+ hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
+ UIC_ERROR |
+ UTP_TASK_REQ_COMPL |
+ DEVICE_FATAL_ERROR |
+ CONTROLLER_FATAL_ERROR |
+ SYSTEM_BUS_FATAL_ERROR);
+ ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+ /* Configure interrupt aggregation */
+ ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ scsi_unblock_requests(hba->host);
+
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ scsi_scan_host(hba->host);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+ int retry;
+
+ /*
+ * msleep of 1 and 5 used in this function might result in msleep(20),
+ * but it was necessary to send the UFS FPGA to reset mode during
+ * development and testing of this driver. msleep can be changed to
+ * mdelay and retry count can be reduced based on the controller.
+ */
+ if (!ufshcd_is_hba_active(hba)) {
+
+ /* change controller state to "reset state" */
+ ufshcd_hba_stop(hba);
+
+ /*
+ * This delay is based on the testing done with UFS host
+ * controller FPGA. The delay can be changed based on the
+ * host controller used.
+ */
+ msleep(5);
+ }
+
+ /* start controller initialization sequence */
+ ufshcd_hba_start(hba);
+
+ /*
+ * To initialize a UFS host controller HCE bit must be set to 1.
+ * During initialization the HCE bit value changes from 1->0->1.
+ * When the host controller completes initialization sequence
+ * it sets the value of HCE bit to 1. The same HCE bit is read back
+ * to check if the controller has completed initialization sequence.
+ * So without this delay the value HCE = 1, set in the previous
+ * instruction might be read back.
+ * This delay can be changed based on the controller.
+ */
+ msleep(1);
+
+ /* wait for the host controller to complete initialization */
+ retry = 10;
+ while (ufshcd_is_hba_active(hba)) {
+ if (retry) {
+ retry--;
+ } else {
+ dev_err(&hba->pdev->dev,
+ "Controller enable failed\n");
+ return -EIO;
+ }
+ msleep(5);
+ }
+ return 0;
+}
+
+/**
+ * ufshcd_initialize_hba - start the initialization process
+ * @hba: per adapter instance
+ *
+ * 1. Enable the controller via ufshcd_hba_enable.
+ * 2. Program the Transfer Request List Address with the starting address of
+ * UTRDL.
+ * 3. Program the Task Management Request List Address with starting address
+ * of UTMRDL.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int ufshcd_initialize_hba(struct ufs_hba *hba)
+{
+ if (ufshcd_hba_enable(hba))
+ return -EIO;
+
+ /* Configure UTRL and UTMRL base address registers */
+ writel(hba->utrdl_dma_addr,
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+ writel(lower_32_bits(hba->utrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
+ writel(hba->utmrdl_dma_addr,
+ (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
+ writel(upper_32_bits(hba->utmrdl_dma_addr),
+ (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
+
+ /* Initialize unipro link startup procedure */
+ return ufshcd_dme_link_startup(hba);
+}
+
+/**
+ * ufshcd_do_reset - reset the host controller
+ * @hba: per adapter instance
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_do_reset(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrbp;
+ unsigned long flags;
+ int tag;
+
+ /* block commands from midlayer */
+ scsi_block_requests(hba->host);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+ /* send controller to reset state */
+ ufshcd_hba_stop(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* abort outstanding commands */
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ if (test_bit(tag, &hba->outstanding_reqs)) {
+ lrbp = &hba->lrb[tag];
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd->result = DID_RESET << 16;
+ lrbp->cmd->scsi_done(lrbp->cmd);
+ lrbp->cmd = NULL;
+ }
+ }
+
+ /* clear outstanding request/task bit maps */
+ hba->outstanding_reqs = 0;
+ hba->outstanding_tasks = 0;
+
+ /* start the initialization process */
+ if (ufshcd_initialize_hba(hba)) {
+ dev_err(&hba->pdev->dev,
+ "Reset: Controller initialization failed\n");
+ return FAILED;
+ }
+ return SUCCESS;
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ sdev->tagged_supported = 1;
+
+ /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+ sdev->use_10_for_ms = 1;
+ scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+
+ /*
+ * Inform SCSI Midlayer that the LUN queue depth is same as the
+ * controller queue depth. If a LUN queue depth is less than the
+ * controller queue depth and if the LUN reports
+ * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
+ * with scsi_adjust_queue_depth.
+ */
+ scsi_activate_tcq(sdev, hba->nutrs);
+ return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+ scsi_deactivate_tcq(sdev, hba->nutrs);
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_rsp *task_rsp_upiup;
+ unsigned long flags;
+ int ocs_value;
+ int task_result;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* Clear completed tasks from outstanding_tasks */
+ __clear_bit(index, &hba->outstanding_tasks);
+
+ task_req_descp = hba->utmrdl_base_addr;
+ ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+ if (ocs_value == OCS_SUCCESS) {
+ task_rsp_upiup = (struct utp_upiu_task_rsp *)
+ task_req_descp[index].task_rsp_upiu;
+ task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
+ task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+
+ if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+ task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
+ task_result = FAILED;
+ } else {
+ task_result = FAILED;
+ dev_err(&hba->pdev->dev,
+ "trc: Invalid ocs = %x\n", ocs_value);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return task_result;
+}
+
+/**
+ * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
+ * SAM_STAT_TASK_SET_FULL SCSI command status.
+ * @cmd: pointer to SCSI command
+ */
+static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
+{
+ struct ufs_hba *hba;
+ int i;
+ int lun_qdepth = 0;
+
+ hba = shost_priv(cmd->device->host);
+
+ /*
+ * LUN queue depth can be obtained by counting outstanding commands
+ * on the LUN.
+ */
+ for (i = 0; i < hba->nutrs; i++) {
+ if (test_bit(i, &hba->outstanding_reqs)) {
+
+ /*
+ * Check if the outstanding command belongs
+ * to the LUN which reported SAM_STAT_TASK_SET_FULL.
+ */
+ if (cmd->device->lun == hba->lrb[i].lun)
+ lun_qdepth++;
+ }
+ }
+
+ /*
+ * LUN queue depth will be total outstanding commands, except the
+ * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
+ */
+ scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrb: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+ int result = 0;
+
+ switch (scsi_status) {
+ case SAM_STAT_GOOD:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ result |= DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ ufshcd_copy_sense_data(lrbp);
+ break;
+ case SAM_STAT_BUSY:
+ result |= SAM_STAT_BUSY;
+ break;
+ case SAM_STAT_TASK_SET_FULL:
+
+ /*
+ * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
+ * depth needs to be adjusted to the exact number of
+ * outstanding commands the LUN can handle at any given time.
+ */
+ ufshcd_adjust_lun_qdepth(lrbp->cmd);
+ result |= SAM_STAT_TASK_SET_FULL;
+ break;
+ case SAM_STAT_TASK_ABORTED:
+ result |= SAM_STAT_TASK_ABORTED;
+ break;
+ default:
+ result |= DID_ERROR << 16;
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrb: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int result = 0;
+ int scsi_status;
+ int ocs;
+
+ /* overall command status of utrd */
+ ocs = ufshcd_get_tr_ocs(lrbp);
+
+ switch (ocs) {
+ case OCS_SUCCESS:
+
+ /* check if the returned transfer response is valid */
+ result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
+ if (result) {
+ dev_err(&hba->pdev->dev,
+ "Invalid response = %x\n", result);
+ break;
+ }
+
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+ break;
+ case OCS_ABORTED:
+ result |= DID_ABORT << 16;
+ break;
+ case OCS_INVALID_CMD_TABLE_ATTR:
+ case OCS_INVALID_PRDT_ATTR:
+ case OCS_MISMATCH_DATA_BUF_SIZE:
+ case OCS_MISMATCH_RESP_UPIU_SIZE:
+ case OCS_PEER_COMM_FAILURE:
+ case OCS_FATAL_ERROR:
+ default:
+ result |= DID_ERROR << 16;
+ dev_err(&hba->pdev->dev,
+ "OCS error from controller = %x\n", ocs);
+ break;
+ } /* end of switch */
+
+ return result;
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrb;
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+ int result;
+ int index;
+
+ lrb = hba->lrb;
+ tr_doorbell =
+ readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ for (index = 0; index < hba->nutrs; index++) {
+ if (test_bit(index, &completed_reqs)) {
+
+ result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
+
+ if (lrb[index].cmd) {
+ scsi_dma_unmap(lrb[index].cmd);
+ lrb[index].cmd->result = result;
+ lrb[index].cmd->scsi_done(lrb[index].cmd);
+
+ /* Mark completed command as NULL in LRB */
+ lrb[index].cmd = NULL;
+ }
+ } /* end of if */
+ } /* end of for */
+
+ /* clear corresponding bits of completed commands */
+ hba->outstanding_reqs ^= completed_reqs;
+
+ /* Reset interrupt aggregation counters */
+ ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+}
+
+/**
+ * ufshcd_uic_cc_handler - handle UIC command completion
+ * @work: pointer to a work queue structure
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static void ufshcd_uic_cc_handler (struct work_struct *work)
+{
+ struct ufs_hba *hba;
+
+ hba = container_of(work, struct ufs_hba, uic_workq);
+
+ if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
+ !(ufshcd_get_uic_cmd_result(hba))) {
+
+ if (ufshcd_make_hba_operational(hba))
+ dev_err(&hba->pdev->dev,
+ "cc: hba not operational state\n");
+ return;
+ }
+}
+
+/**
+ * ufshcd_fatal_err_handler - handle fatal errors
+ * @hba: per adapter instance
+ */
+static void ufshcd_fatal_err_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ hba = container_of(work, struct ufs_hba, feh_workq);
+
+ /* check if reset is already in progress */
+ if (hba->ufshcd_state != UFSHCD_STATE_RESET)
+ ufshcd_do_reset(hba);
+}
+
+/**
+ * ufshcd_err_handler - Check for fatal errors
+ * @work: pointer to a work queue structure
+ */
+static void ufshcd_err_handler(struct ufs_hba *hba)
+{
+ u32 reg;
+
+ if (hba->errors & INT_FATAL_ERRORS)
+ goto fatal_eh;
+
+ if (hba->errors & UIC_ERROR) {
+
+ reg = readl(hba->mmio_base +
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ goto fatal_eh;
+ }
+ return;
+fatal_eh:
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ schedule_work(&hba->feh_workq);
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+ u32 tm_doorbell;
+
+ tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
+ hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+ wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+ hba->errors = UFSHCD_ERROR_MASK & intr_status;
+ if (hba->errors)
+ ufshcd_err_handler(hba);
+
+ if (intr_status & UIC_COMMAND_COMPL)
+ schedule_work(&hba->uic_workq);
+
+ if (intr_status & UTP_TASK_REQ_COMPL)
+ ufshcd_tmc_handler(hba);
+
+ if (intr_status & UTP_TRANSFER_REQ_COMPL)
+ ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ u32 intr_status;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+
+ spin_lock(hba->host->host_lock);
+ intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
+
+ if (intr_status) {
+ ufshcd_sl_intr(hba, intr_status);
+
+ /* If UFSHCI 1.0 then clear interrupt status register */
+ if (hba->ufs_version == UFSHCI_VERSION_10)
+ writel(intr_status,
+ (hba->mmio_base + REG_INTERRUPT_STATUS));
+ retval = IRQ_HANDLED;
+ }
+ spin_unlock(hba->host->host_lock);
+ return retval;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int
+ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp,
+ u8 tm_function)
+{
+ struct utp_task_req_desc *task_req_descp;
+ struct utp_upiu_task_req *task_req_upiup;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int free_slot = 0;
+ int err;
+
+ host = hba->host;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* If task management queue is full */
+ free_slot = ufshcd_get_tm_free_slot(hba);
+ if (free_slot >= hba->nutmrs) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ dev_err(&hba->pdev->dev, "Task management queue full\n");
+ err = FAILED;
+ goto out;
+ }
+
+ task_req_descp = hba->utmrdl_base_addr;
+ task_req_descp += free_slot;
+
+ /* Configure task request descriptor */
+ task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ task_req_descp->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ task_req_upiup =
+ (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+ task_req_upiup->header.dword_0 =
+ cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lrbp->lun, lrbp->task_tag));
+ task_req_upiup->header.dword_1 =
+ cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+
+ task_req_upiup->input_param1 = lrbp->lun;
+ task_req_upiup->input_param1 =
+ cpu_to_be32(task_req_upiup->input_param1);
+ task_req_upiup->input_param2 = lrbp->task_tag;
+ task_req_upiup->input_param2 =
+ cpu_to_be32(task_req_upiup->input_param2);
+
+ /* send command to the controller */
+ __set_bit(free_slot, &hba->outstanding_tasks);
+ writel((1 << free_slot),
+ (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /* wait until the task management command is completed */
+ err =
+ wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
+ (test_bit(free_slot,
+ &hba->tm_condition) != 0),
+ 60 * HZ);
+ if (!err) {
+ dev_err(&hba->pdev->dev,
+ "Task management command timed-out\n");
+ err = FAILED;
+ goto out;
+ }
+ clear_bit(free_slot, &hba->tm_condition);
+ return ufshcd_task_req_compl(hba, free_slot);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_device_reset - reset device and abort all the pending commands
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned int tag;
+ u32 pos;
+ int err;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
+ if (err)
+ goto out;
+
+ for (pos = 0; pos < hba->nutrs; pos++) {
+ if (test_bit(pos, &hba->outstanding_reqs) &&
+ (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
+
+ /* clear the respective UTRLCLR register bit */
+ ufshcd_utrl_clear(hba, pos);
+
+ clear_bit(pos, &hba->outstanding_reqs);
+
+ if (hba->lrb[pos].cmd) {
+ scsi_dma_unmap(hba->lrb[pos].cmd);
+ hba->lrb[pos].cmd->result =
+ DID_ABORT << 16;
+ hba->lrb[pos].cmd->scsi_done(cmd);
+ hba->lrb[pos].cmd = NULL;
+ }
+ }
+ } /* end of for */
+out:
+ return err;
+}
+
+/**
+ * ufshcd_host_reset - Main reset function registered with scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_host_reset(struct scsi_cmnd *cmd)
+{
+ struct ufs_hba *hba;
+
+ hba = shost_priv(cmd->device->host);
+
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ return SUCCESS;
+
+ return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ unsigned long flags;
+ unsigned int tag;
+ int err;
+
+ host = cmd->device->host;
+ hba = shost_priv(host);
+ tag = cmd->request->tag;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* check if command is still pending */
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ err = FAILED;
+ spin_unlock_irqrestore(host->host_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
+ if (err)
+ goto out;
+
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ /* clear the respective UTRLCLR register bit */
+ ufshcd_utrl_clear(hba, tag);
+
+ __clear_bit(tag, &hba->outstanding_reqs);
+ hba->lrb[tag].cmd = NULL;
+ spin_unlock_irqrestore(host->host_lock, flags);
+out:
+ return err;
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+ .module = THIS_MODULE,
+ .name = UFSHCD,
+ .proc_name = UFSHCD,
+ .queuecommand = ufshcd_queuecommand,
+ .slave_alloc = ufshcd_slave_alloc,
+ .slave_destroy = ufshcd_slave_destroy,
+ .eh_abort_handler = ufshcd_abort,
+ .eh_device_reset_handler = ufshcd_device_reset,
+ .eh_host_reset_handler = ufshcd_host_reset,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = UFSHCD_CMD_PER_LUN,
+ .can_queue = UFSHCD_CAN_QUEUE,
+};
+
+/**
+ * ufshcd_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Block SCSI requests from SCSI midlayer
+ * 2. Change the internal driver state to non operational
+ * 3. Set UTRLRSR and UTMRLRSR bits to zero
+ * 4. Wait until outstanding commands are completed
+ * 5. Set HCE to zero to send the UFS host controller to reset state
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Set HCE to 1, to start the UFS host controller
+ * initialization process
+ * 2. Set UTRLRSR and UTMRLRSR bits to 1
+ * 3. Change the internal driver state to operational
+ * 4. Unblock SCSI requests from SCSI midlayer
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_hba_free - free allocated memory for
+ * host memory space data structures
+ * @hba: per adapter instance
+ */
+static void ufshcd_hba_free(struct ufs_hba *hba)
+{
+ iounmap(hba->mmio_base);
+ ufshcd_free_hba_memory(hba);
+ pci_release_regions(hba->pdev);
+}
+
+/**
+ * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ /* disable interrupts */
+ ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
+ free_irq(pdev->irq, hba);
+
+ ufshcd_hba_stop(hba);
+ ufshcd_hba_free(hba);
+
+ scsi_remove_host(hba->host);
+ scsi_host_put(hba->host);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ int err;
+ u64 dma_mask;
+
+ /*
+ * If controller supports 64 bit addressing mode, then set the DMA
+ * mask to 64-bit, else set the DMA mask to 32-bit
+ */
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
+ dma_mask = DMA_BIT_MASK(64);
+ else
+ dma_mask = DMA_BIT_MASK(32);
+
+ err = pci_set_dma_mask(hba->pdev, dma_mask);
+ if (err)
+ return err;
+
+ err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
+
+ return err;
+}
+
+/**
+ * ufshcd_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int __devinit
+ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&ufshcd_driver_template,
+ sizeof(struct ufs_hba));
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ err = -ENOMEM;
+ goto out_disable;
+ }
+ hba = shost_priv(host);
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ hba->mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!hba->mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ hba->host = host;
+ hba->pdev = pdev;
+
+ /* Read capabilities registers */
+ ufshcd_hba_capabilities(hba);
+
+ /* Get UFS version supported by the controller */
+ hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ /* Allocate memory for host memory space */
+ err = ufshcd_memory_alloc(hba);
+ if (err) {
+ dev_err(&pdev->dev, "Memory allocation failed\n");
+ goto out_iounmap;
+ }
+
+ /* Configure LRB */
+ ufshcd_host_memory_configure(hba);
+
+ host->can_queue = hba->nutrs;
+ host->cmd_per_lun = hba->nutrs;
+ host->max_id = UFSHCD_MAX_ID;
+ host->max_lun = UFSHCD_MAX_LUNS;
+ host->max_channel = UFSHCD_MAX_CHANNEL;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CDB_SIZE;
+
+ /* Initailize wait queue for task management */
+ init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+
+ /* Initialize work queues */
+ INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
+ INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+
+ /* IRQ registration */
+ err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ if (err) {
+ dev_err(&pdev->dev, "request irq failed\n");
+ goto out_lrb_free;
+ }
+
+ /* Enable SCSI tag mapping */
+ err = scsi_init_shared_tag_map(host, host->can_queue);
+ if (err) {
+ dev_err(&pdev->dev, "init shared queue failed\n");
+ goto out_free_irq;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ err = scsi_add_host(host, &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "scsi_add_host failed\n");
+ goto out_free_irq;
+ }
+
+ /* Initialization routine */
+ err = ufshcd_initialize_hba(hba);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_free_irq;
+ }
+
+ return 0;
+
+out_free_irq:
+ free_irq(pdev->irq, hba);
+out_lrb_free:
+ ufshcd_free_hba_memory(hba);
+out_iounmap:
+ iounmap(hba->mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ scsi_host_put(host);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_probe,
+ .remove = __devexit_p(ufshcd_remove),
+ .shutdown = ufshcd_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_suspend,
+ .resume = ufshcd_resume,
+#endif
+};
+
+/**
+ * ufshcd_init - Driver registration routine
+ */
+static int __init ufshcd_init(void)
+{
+ return pci_register_driver(&ufshcd_pci_driver);
+}
+module_init(ufshcd_init);
+
+/**
+ * ufshcd_exit - Driver exit clean-up routine
+ */
+static void __exit ufshcd_exit(void)
+{
+ pci_unregister_driver(&ufshcd_pci_driver);
+}
+module_exit(ufshcd_exit);
+
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
+ "Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644
index 000000000000..6e3510f71167
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci.h
@@ -0,0 +1,376 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshci.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+enum {
+ TASK_REQ_UPIU_SIZE_DWORDS = 8,
+ TASK_RSP_UPIU_SIZE_DWORDS = 8,
+ ALIGNED_UPIU_SIZE = 128,
+};
+
+/* UFSHCI Registers */
+enum {
+ REG_CONTROLLER_CAPABILITIES = 0x00,
+ REG_UFS_VERSION = 0x08,
+ REG_CONTROLLER_DEV_ID = 0x10,
+ REG_CONTROLLER_PROD_ID = 0x14,
+ REG_INTERRUPT_STATUS = 0x20,
+ REG_INTERRUPT_ENABLE = 0x24,
+ REG_CONTROLLER_STATUS = 0x30,
+ REG_CONTROLLER_ENABLE = 0x34,
+ REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
+ REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
+ REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
+ REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
+ REG_UIC_ERROR_CODE_DME = 0x48,
+ REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
+ REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
+ REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
+ REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
+ REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
+ REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
+ REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
+ REG_UIC_COMMAND = 0x90,
+ REG_UIC_COMMAND_ARG_1 = 0x94,
+ REG_UIC_COMMAND_ARG_2 = 0x98,
+ REG_UIC_COMMAND_ARG_3 = 0x9C,
+};
+
+/* Controller capability masks */
+enum {
+ MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
+ MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_64_ADDRESSING_SUPPORT = 0x01000000,
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+};
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
+
+/* Controller UFSHCI version */
+enum {
+ UFSHCI_VERSION_10 = 0x00010000,
+ UFSHCI_VERSION_11 = 0x00010100,
+};
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ * - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ * - Product/Manufacturer ID 14h
+ */
+#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
+
+#define UFS_BIT(x) (1L << (x))
+
+#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
+#define UIC_DME_END_PT_RESET UFS_BIT(1)
+#define UIC_ERROR UFS_BIT(2)
+#define UIC_TEST_MODE UFS_BIT(3)
+#define UIC_POWER_MODE UFS_BIT(4)
+#define UIC_HIBERNATE_EXIT UFS_BIT(5)
+#define UIC_HIBERNATE_ENTER UFS_BIT(6)
+#define UIC_LINK_LOST UFS_BIT(7)
+#define UIC_LINK_STARTUP UFS_BIT(8)
+#define UTP_TASK_REQ_COMPL UFS_BIT(9)
+#define UIC_COMMAND_COMPL UFS_BIT(10)
+#define DEVICE_FATAL_ERROR UFS_BIT(11)
+#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
+#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+
+#define UFSHCD_ERROR_MASK (UIC_ERROR |\
+ DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
+ SYSTEM_BUS_FATAL_ERROR)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
+#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
+#define UIC_COMMAND_READY UFS_BIT(3)
+#define HOST_ERROR_INDICATOR UFS_BIT(4)
+#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CONTROLLER_DISABLE 0x0
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR UFS_BIT(31)
+#define UIC_DME_ERROR_CODE_MASK 0x1
+
+#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
+#define INT_AGGR_STATUS_BIT UFS_BIT(20)
+#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
+#define INT_AGGR_ENABLE UFS_BIT(31)
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK 0xFF
+#define GEN_SELECTOR_INDEX_MASK 0xFFFF
+
+#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL 0xFF
+
+#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK 0xFF
+#define GENERIC_ERROR_CODE_MASK 0xFF
+
+/* UIC Commands */
+enum {
+ UIC_CMD_DME_GET = 0x01,
+ UIC_CMD_DME_SET = 0x02,
+ UIC_CMD_DME_PEER_GET = 0x03,
+ UIC_CMD_DME_PEER_SET = 0x04,
+ UIC_CMD_DME_POWERON = 0x10,
+ UIC_CMD_DME_POWEROFF = 0x11,
+ UIC_CMD_DME_ENABLE = 0x12,
+ UIC_CMD_DME_RESET = 0x14,
+ UIC_CMD_DME_END_PT_RST = 0x15,
+ UIC_CMD_DME_LINK_STARTUP = 0x16,
+ UIC_CMD_DME_HIBER_ENTER = 0x17,
+ UIC_CMD_DME_HIBER_EXIT = 0x18,
+ UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+ UIC_CMD_RESULT_SUCCESS = 0x00,
+ UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+ UIC_CMD_RESULT_FAILURE = 0x01,
+ UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+ UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+ UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+ UIC_CMD_RESULT_BAD_INDEX = 0x05,
+ UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+ UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+ UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+ UIC_CMD_RESULT_BUSY = 0x09,
+ UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT 0xFF
+
+#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8)
+#define INT_AGGR_TIMEOUT_VALUE (0x02)
+
+/* Interrupt disable masks */
+enum {
+ /* Interrupt disable mask for UFSHCI v1.0 */
+ INTERRUPT_DISABLE_MASK_10 = 0xFFFF,
+
+ /* Interrupt disable mask for UFSHCI v1.1 */
+ INTERRUPT_DISABLE_MASK_11 = 0x0,
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+ UTP_CMD_TYPE_SCSI = 0x0,
+ UTP_CMD_TYPE_UFS = 0x1,
+ UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+enum {
+ UTP_SCSI_COMMAND = 0x00000000,
+ UTP_NATIVE_UFS_COMMAND = 0x10000000,
+ UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+ UTP_REQ_DESC_INT_CMD = 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+ UTP_NO_DATA_TRANSFER = 0x00000000,
+ UTP_HOST_TO_DEVICE = 0x02000000,
+ UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+ OCS_SUCCESS = 0x0,
+ OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+ OCS_INVALID_PRDT_ATTR = 0x2,
+ OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+ OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+ OCS_PEER_COMM_FAILURE = 0x5,
+ OCS_ABORTED = 0x6,
+ OCS_FATAL_ERROR = 0x7,
+ OCS_INVALID_COMMAND_STATUS = 0x0F,
+ MASK_OCS = 0x0F,
+};
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @base_addr: Lower 32bit physical address DW-0
+ * @upper_addr: Upper 32bit physical address DW-1
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+ u32 base_addr;
+ u32 upper_addr;
+ u32 reserved;
+ u32 size;
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+ u8 command_upiu[ALIGNED_UPIU_SIZE];
+ u8 response_upiu[ALIGNED_UPIU_SIZE];
+ struct ufshcd_sg_entry prd_table[SG_ALL];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+ u32 dword_0;
+ u32 dword_1;
+ u32 dword_2;
+ u32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-5*/
+ u32 command_desc_base_addr_lo;
+ u32 command_desc_base_addr_hi;
+
+ /* DW 6 */
+ u16 response_upiu_length;
+ u16 response_upiu_offset;
+
+ /* DW 7 */
+ u16 prd_table_length;
+ u16 prd_table_offset;
+};
+
+/**
+ * struct utp_task_req_desc - UTMRD structure
+ * @header: UTMRD header DW-0 to DW-3
+ * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
+ * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+ */
+struct utp_task_req_desc {
+
+ /* DW 0-3 */
+ struct request_desc_header header;
+
+ /* DW 4-11 */
+ u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+
+ /* DW 12-19 */
+ u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 7e22b737dfd8..14e0c40a68c9 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -141,7 +141,6 @@
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/system.h>
#include <asm/dma.h>
#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
new file mode 100644
index 000000000000..efccd72c4a3e
--- /dev/null
+++ b/drivers/scsi/virtio_scsi.c
@@ -0,0 +1,594 @@
+/*
+ * Virtio SCSI HBA driver
+ *
+ * Copyright IBM Corp. 2010
+ * Copyright Red Hat, Inc. 2011
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#define VIRTIO_SCSI_MEMPOOL_SZ 64
+
+/* Command queue element */
+struct virtio_scsi_cmd {
+ struct scsi_cmnd *sc;
+ struct completion *comp;
+ union {
+ struct virtio_scsi_cmd_req cmd;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ struct virtio_scsi_ctrl_an_req an;
+ } req;
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ struct virtio_scsi_event evt;
+ } resp;
+} ____cacheline_aligned_in_smp;
+
+/* Driver instance state */
+struct virtio_scsi {
+ /* Protects ctrl_vq, req_vq and sg[] */
+ spinlock_t vq_lock;
+
+ struct virtio_device *vdev;
+ struct virtqueue *ctrl_vq;
+ struct virtqueue *event_vq;
+ struct virtqueue *req_vq;
+
+ /* For sglist construction when adding commands to the virtqueue. */
+ struct scatterlist sg[];
+};
+
+static struct kmem_cache *virtscsi_cmd_cache;
+static mempool_t *virtscsi_cmd_pool;
+
+static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
+{
+ return vdev->priv;
+}
+
+static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
+{
+ if (!resid)
+ return;
+
+ if (!scsi_bidi_cmnd(sc)) {
+ scsi_set_resid(sc, resid);
+ return;
+ }
+
+ scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
+ scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
+}
+
+/**
+ * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
+ *
+ * Called with vq_lock held.
+ */
+static void virtscsi_complete_cmd(void *buf)
+{
+ struct virtio_scsi_cmd *cmd = buf;
+ struct scsi_cmnd *sc = cmd->sc;
+ struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
+
+ dev_dbg(&sc->device->sdev_gendev,
+ "cmd %p response %u status %#02x sense_len %u\n",
+ sc, resp->response, resp->status, resp->sense_len);
+
+ sc->result = resp->status;
+ virtscsi_compute_resid(sc, resp->resid);
+ switch (resp->response) {
+ case VIRTIO_SCSI_S_OK:
+ set_host_byte(sc, DID_OK);
+ break;
+ case VIRTIO_SCSI_S_OVERRUN:
+ set_host_byte(sc, DID_ERROR);
+ break;
+ case VIRTIO_SCSI_S_ABORTED:
+ set_host_byte(sc, DID_ABORT);
+ break;
+ case VIRTIO_SCSI_S_BAD_TARGET:
+ set_host_byte(sc, DID_BAD_TARGET);
+ break;
+ case VIRTIO_SCSI_S_RESET:
+ set_host_byte(sc, DID_RESET);
+ break;
+ case VIRTIO_SCSI_S_BUSY:
+ set_host_byte(sc, DID_BUS_BUSY);
+ break;
+ case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ break;
+ case VIRTIO_SCSI_S_TARGET_FAILURE:
+ set_host_byte(sc, DID_TARGET_FAILURE);
+ break;
+ case VIRTIO_SCSI_S_NEXUS_FAILURE:
+ set_host_byte(sc, DID_NEXUS_FAILURE);
+ break;
+ default:
+ scmd_printk(KERN_WARNING, sc, "Unknown response %d",
+ resp->response);
+ /* fall through */
+ case VIRTIO_SCSI_S_FAILURE:
+ set_host_byte(sc, DID_ERROR);
+ break;
+ }
+
+ WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
+ if (sc->sense_buffer) {
+ memcpy(sc->sense_buffer, resp->sense,
+ min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
+ if (resp->sense_len)
+ set_driver_byte(sc, DRIVER_SENSE);
+ }
+
+ mempool_free(cmd, virtscsi_cmd_pool);
+ sc->scsi_done(sc);
+}
+
+static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ void *buf;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vscsi->vq_lock, flags);
+
+ do {
+ virtqueue_disable_cb(vq);
+ while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
+ fn(buf);
+ } while (!virtqueue_enable_cb(vq));
+
+ spin_unlock_irqrestore(&vscsi->vq_lock, flags);
+}
+
+static void virtscsi_req_done(struct virtqueue *vq)
+{
+ virtscsi_vq_done(vq, virtscsi_complete_cmd);
+};
+
+static void virtscsi_complete_free(void *buf)
+{
+ struct virtio_scsi_cmd *cmd = buf;
+
+ if (cmd->comp)
+ complete_all(cmd->comp);
+ mempool_free(cmd, virtscsi_cmd_pool);
+}
+
+static void virtscsi_ctrl_done(struct virtqueue *vq)
+{
+ virtscsi_vq_done(vq, virtscsi_complete_free);
+};
+
+static void virtscsi_event_done(struct virtqueue *vq)
+{
+ virtscsi_vq_done(vq, virtscsi_complete_free);
+};
+
+static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
+ struct scsi_data_buffer *sdb)
+{
+ struct sg_table *table = &sdb->table;
+ struct scatterlist *sg_elem;
+ unsigned int idx = *p_idx;
+ int i;
+
+ for_each_sg(table->sgl, sg_elem, table->nents, i)
+ sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
+
+ *p_idx = idx;
+}
+
+/**
+ * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
+ * @vscsi : virtio_scsi state
+ * @cmd : command structure
+ * @out_num : number of read-only elements
+ * @in_num : number of write-only elements
+ * @req_size : size of the request buffer
+ * @resp_size : size of the response buffer
+ *
+ * Called with vq_lock held.
+ */
+static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
+ struct virtio_scsi_cmd *cmd,
+ unsigned *out_num, unsigned *in_num,
+ size_t req_size, size_t resp_size)
+{
+ struct scsi_cmnd *sc = cmd->sc;
+ struct scatterlist *sg = vscsi->sg;
+ unsigned int idx = 0;
+
+ if (sc) {
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
+
+ /* TODO: check feature bit and fail if unsupported? */
+ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
+ }
+
+ /* Request header. */
+ sg_set_buf(&sg[idx++], &cmd->req, req_size);
+
+ /* Data-out buffer. */
+ if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
+ virtscsi_map_sgl(sg, &idx, scsi_out(sc));
+
+ *out_num = idx;
+
+ /* Response header. */
+ sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
+
+ /* Data-in buffer */
+ if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
+ virtscsi_map_sgl(sg, &idx, scsi_in(sc));
+
+ *in_num = idx - *out_num;
+}
+
+static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
+ struct virtio_scsi_cmd *cmd,
+ size_t req_size, size_t resp_size, gfp_t gfp)
+{
+ unsigned int out_num, in_num;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vscsi->vq_lock, flags);
+
+ virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
+
+ ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
+ if (ret >= 0)
+ virtqueue_kick(vq);
+
+ spin_unlock_irqrestore(&vscsi->vq_lock, flags);
+ return ret;
+}
+
+static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_cmd *cmd;
+ int ret;
+
+ dev_dbg(&sc->device->sdev_gendev,
+ "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
+
+ ret = SCSI_MLQUEUE_HOST_BUSY;
+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
+ if (!cmd)
+ goto out;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+ cmd->req.cmd = (struct virtio_scsi_cmd_req){
+ .lun[0] = 1,
+ .lun[1] = sc->device->id,
+ .lun[2] = (sc->device->lun >> 8) | 0x40,
+ .lun[3] = sc->device->lun & 0xff,
+ .tag = (unsigned long)sc,
+ .task_attr = VIRTIO_SCSI_S_SIMPLE,
+ .prio = 0,
+ .crn = 0,
+ };
+
+ BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
+ memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+
+ if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
+ sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
+ GFP_ATOMIC) >= 0)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+{
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int ret;
+
+ cmd->comp = &comp;
+ ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
+ GFP_NOIO);
+ if (ret < 0)
+ return FAILED;
+
+ wait_for_completion(&comp);
+ if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
+ cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+static int virtscsi_device_reset(struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+ struct virtio_scsi_cmd *cmd;
+
+ sdev_printk(KERN_INFO, sc->device, "device reset\n");
+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+ if (!cmd)
+ return FAILED;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+ .type = VIRTIO_SCSI_T_TMF,
+ .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
+ .lun[0] = 1,
+ .lun[1] = sc->device->id,
+ .lun[2] = (sc->device->lun >> 8) | 0x40,
+ .lun[3] = sc->device->lun & 0xff,
+ };
+ return virtscsi_tmf(vscsi, cmd);
+}
+
+static int virtscsi_abort(struct scsi_cmnd *sc)
+{
+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
+ struct virtio_scsi_cmd *cmd;
+
+ scmd_printk(KERN_INFO, sc, "abort\n");
+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
+ if (!cmd)
+ return FAILED;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->sc = sc;
+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
+ .type = VIRTIO_SCSI_T_TMF,
+ .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
+ .lun[0] = 1,
+ .lun[1] = sc->device->id,
+ .lun[2] = (sc->device->lun >> 8) | 0x40,
+ .lun[3] = sc->device->lun & 0xff,
+ .tag = (unsigned long)sc,
+ };
+ return virtscsi_tmf(vscsi, cmd);
+}
+
+static struct scsi_host_template virtscsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "Virtio SCSI HBA",
+ .proc_name = "virtio_scsi",
+ .queuecommand = virtscsi_queuecommand,
+ .this_id = -1,
+ .eh_abort_handler = virtscsi_abort,
+ .eh_device_reset_handler = virtscsi_device_reset,
+
+ .can_queue = 1024,
+ .dma_boundary = UINT_MAX,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+#define virtscsi_config_get(vdev, fld) \
+ ({ \
+ typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+ vdev->config->get(vdev, \
+ offsetof(struct virtio_scsi_config, fld), \
+ &__val, sizeof(__val)); \
+ __val; \
+ })
+
+#define virtscsi_config_set(vdev, fld, val) \
+ (void)({ \
+ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+ vdev->config->set(vdev, \
+ offsetof(struct virtio_scsi_config, fld), \
+ &__val, sizeof(__val)); \
+ })
+
+static int virtscsi_init(struct virtio_device *vdev,
+ struct virtio_scsi *vscsi)
+{
+ int err;
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = {
+ virtscsi_ctrl_done,
+ virtscsi_event_done,
+ virtscsi_req_done
+ };
+ const char *names[] = {
+ "control",
+ "event",
+ "request"
+ };
+
+ /* Discover virtqueues and write information to configuration. */
+ err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
+ if (err)
+ return err;
+
+ vscsi->ctrl_vq = vqs[0];
+ vscsi->event_vq = vqs[1];
+ vscsi->req_vq = vqs[2];
+
+ virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
+ virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
+ return 0;
+}
+
+static int __devinit virtscsi_probe(struct virtio_device *vdev)
+{
+ struct Scsi_Host *shost;
+ struct virtio_scsi *vscsi;
+ int err;
+ u32 sg_elems;
+ u32 cmd_per_lun;
+
+ /* We need to know how many segments before we allocate.
+ * We need an extra sg elements at head and tail.
+ */
+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
+
+ /* Allocate memory and link the structs together. */
+ shost = scsi_host_alloc(&virtscsi_host_template,
+ sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
+
+ if (!shost)
+ return -ENOMEM;
+
+ shost->sg_tablesize = sg_elems;
+ vscsi = shost_priv(shost);
+ vscsi->vdev = vdev;
+ vdev->priv = shost;
+
+ /* Random initializations. */
+ spin_lock_init(&vscsi->vq_lock);
+ sg_init_table(vscsi->sg, sg_elems + 2);
+
+ err = virtscsi_init(vdev, vscsi);
+ if (err)
+ goto virtscsi_init_failed;
+
+ cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
+ shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
+ shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
+ shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
+ shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
+ shost->max_channel = 0;
+ shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+ err = scsi_add_host(shost, &vdev->dev);
+ if (err)
+ goto scsi_add_host_failed;
+
+ scsi_scan_host(shost);
+
+ return 0;
+
+scsi_add_host_failed:
+ vdev->config->del_vqs(vdev);
+virtscsi_init_failed:
+ scsi_host_put(shost);
+ return err;
+}
+
+static void virtscsi_remove_vqs(struct virtio_device *vdev)
+{
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+ vdev->config->del_vqs(vdev);
+}
+
+static void __devexit virtscsi_remove(struct virtio_device *vdev)
+{
+ struct Scsi_Host *shost = virtio_scsi_host(vdev);
+
+ scsi_remove_host(shost);
+
+ virtscsi_remove_vqs(vdev);
+ scsi_host_put(shost);
+}
+
+#ifdef CONFIG_PM
+static int virtscsi_freeze(struct virtio_device *vdev)
+{
+ virtscsi_remove_vqs(vdev);
+ return 0;
+}
+
+static int virtscsi_restore(struct virtio_device *vdev)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
+ return virtscsi_init(vdev, vscsi);
+}
+#endif
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_scsi_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtscsi_probe,
+#ifdef CONFIG_PM
+ .freeze = virtscsi_freeze,
+ .restore = virtscsi_restore,
+#endif
+ .remove = __devexit_p(virtscsi_remove),
+};
+
+static int __init init(void)
+{
+ int ret = -ENOMEM;
+
+ virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
+ if (!virtscsi_cmd_cache) {
+ printk(KERN_ERR "kmem_cache_create() for "
+ "virtscsi_cmd_cache failed\n");
+ goto error;
+ }
+
+
+ virtscsi_cmd_pool =
+ mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
+ virtscsi_cmd_cache);
+ if (!virtscsi_cmd_pool) {
+ printk(KERN_ERR "mempool_create() for"
+ "virtscsi_cmd_pool failed\n");
+ goto error;
+ }
+ ret = register_virtio_driver(&virtio_scsi_driver);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ if (virtscsi_cmd_pool) {
+ mempool_destroy(virtscsi_cmd_pool);
+ virtscsi_cmd_pool = NULL;
+ }
+ if (virtscsi_cmd_cache) {
+ kmem_cache_destroy(virtscsi_cmd_cache);
+ virtscsi_cmd_cache = NULL;
+ }
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_scsi_driver);
+ mempool_destroy(virtscsi_cmd_pool);
+ kmem_cache_destroy(virtscsi_cmd_cache);
+}
+module_init(init);
+module_exit(fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio SCSI HBA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 7264116185d5..4411d4224401 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
*/
@@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
return 0;
}
+/*
+ * Query the device, fetch the config info and return the
+ * maximum number of targets on the adapter. In case of
+ * failure due to any reason return default i.e. 16.
+ */
+static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
+{
+ struct PVSCSICmdDescConfigCmd cmd;
+ struct PVSCSIConfigPageHeader *header;
+ struct device *dev;
+ dma_addr_t configPagePA;
+ void *config_page;
+ u32 numPhys = 16;
+
+ dev = pvscsi_dev(adapter);
+ config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &configPagePA);
+ if (!config_page) {
+ dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
+ goto exit;
+ }
+ BUG_ON(configPagePA & ~PAGE_MASK);
+
+ /* Fetch config info from the device. */
+ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
+ cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
+ cmd.cmpAddr = configPagePA;
+ cmd._pad = 0;
+
+ /*
+ * Mark the completion page header with error values. If the device
+ * completes the command successfully, it sets the status values to
+ * indicate success.
+ */
+ header = config_page;
+ memset(header, 0, sizeof *header);
+ header->hostStatus = BTSTAT_INVPARAM;
+ header->scsiStatus = SDSTAT_CHECK;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
+
+ if (header->hostStatus == BTSTAT_SUCCESS &&
+ header->scsiStatus == SDSTAT_GOOD) {
+ struct PVSCSIConfigPageController *config;
+
+ config = config_page;
+ numPhys = config->numPhys;
+ } else
+ dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
+ header->hostStatus, header->scsiStatus);
+ pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+exit:
+ return numPhys;
+}
+
static int __devinit pvscsi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
struct Scsi_Host *host;
+ struct device *dev;
unsigned int i;
unsigned long flags = 0;
int error;
@@ -1272,6 +1328,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev,
}
/*
+ * Ask the device for max number of targets.
+ */
+ host->max_id = pvscsi_get_max_targets(adapter);
+ dev = pvscsi_dev(adapter);
+ dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
+
+ /*
* From this point on we should reset the adapter if anything goes
* wrong.
*/
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 62e36e75715e..3546e8662e30 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*
*/
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
@@ -39,28 +39,45 @@
* host adapter status/error codes
*/
enum HostBusAdapterStatus {
- BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
- BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
- BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
- BTSTAT_DATA_UNDERRUN = 0x0c,
- BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
- BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
- BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
- BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
- BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
- BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
- BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
- BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
- BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
- BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
- BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
- BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
- BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
- BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
- BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
- BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
- BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
- BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+ BTSTAT_DATA_UNDERRUN = 0x0c,
+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence
+ * requested by target */
+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from
+ * first CCB */
+ BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment
+ * list */
+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message
+ * rejected by target */
+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the
+ * host adapter */
+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN,
+ * sent a SCSI RST */
+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI
+ * RST */
+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly
+ * (w/o tag) */
+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+};
+
+/*
+ * SCSI device status values.
+ */
+enum ScsiDeviceStatus {
+ SDSTAT_GOOD = 0x00, /* No errors. */
+ SDSTAT_CHECK = 0x02, /* Check condition. */
};
/*
@@ -114,6 +131,29 @@ struct PVSCSICmdDescResetDevice {
} __packed;
/*
+ * Command descriptor for PVSCSI_CMD_CONFIG --
+ */
+
+struct PVSCSICmdDescConfigCmd {
+ u64 cmpAddr;
+ u64 configPageAddress;
+ u32 configPageNum;
+ u32 _pad;
+} __packed;
+
+enum PVSCSIConfigPageType {
+ PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
+ PVSCSI_CONFIG_PAGE_PHY = 0x1959,
+ PVSCSI_CONFIG_PAGE_DEVICE = 0x195a,
+};
+
+enum PVSCSIConfigPageAddressType {
+ PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
+ PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121,
+ PVSCSI_CONFIG_PHY_ADDRESS = 0x2122,
+};
+
+/*
* Command descriptor for PVSCSI_CMD_ABORT_CMD --
*
* - currently does not support specifying the LUN.
@@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc {
u32 _pad[2];
} __packed;
+struct PVSCSIConfigPageHeader {
+ u32 pageNum;
+ u16 numDwords;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u16 reserved[3];
+} __packed;
+
+struct PVSCSIConfigPageController {
+ struct PVSCSIConfigPageHeader header;
+ u64 nodeWWN; /* Device name as defined in the SAS spec. */
+ u16 manufacturer[64];
+ u16 serialNumber[64];
+ u16 opromVersion[32];
+ u16 hwVersion[32];
+ u16 firmwareVersion[32];
+ u32 numPhys;
+ u8 useConsecutivePhyWWNs;
+ u8 reserved[3];
+} __packed;
+
/*
* Interrupt status / IRQ bits.
*/
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 9ee0afef2d16..d89a5dfd3ade 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -179,7 +179,6 @@
#include <linux/stat.h>
#include <linux/io.h>
-#include <asm/system.h>
#include <asm/dma.h>
#include <scsi/scsi.h>